From cfe3b0005f6e6e2b5384851a153ee831bad56d44 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Sat, 1 May 2021 13:18:21 -0700 Subject: [PATCH] [RISCV] Reorder masked builtin operands. Use clang_builtin_alias for all overloaded vector builtins. This patch makes the builtin operand order match the C operand order for all intrinsics. With this we can use clang_builtin_alias for all overloaded intrinsics. This should further reduce the test time for vector intrinsics. Differential Revision: https://reviews.llvm.org/D101700 --- clang/include/clang/Basic/riscv_vector.td | 84 ++- .../RISCV/rvv-intrinsics-overloaded/vaadd.c | 176 ++--- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c | 176 ++--- .../RISCV/rvv-intrinsics-overloaded/vamoadd.c | 140 ++-- .../RISCV/rvv-intrinsics-overloaded/vamoand.c | 140 ++-- .../RISCV/rvv-intrinsics-overloaded/vamomax.c | 140 ++-- .../RISCV/rvv-intrinsics-overloaded/vamomin.c | 140 ++-- .../RISCV/rvv-intrinsics-overloaded/vamoor.c | 140 ++-- .../RISCV/rvv-intrinsics-overloaded/vamoswap.c | 210 +++--- .../RISCV/rvv-intrinsics-overloaded/vamoxor.c | 140 ++-- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c | 176 ++--- .../RISCV/rvv-intrinsics-overloaded/vasub.c | 176 ++--- .../RISCV/rvv-intrinsics-overloaded/vcompress.c | 106 +-- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c | 176 ++--- .../RISCV/rvv-intrinsics-overloaded/vfabs.c | 18 +- .../RISCV/rvv-intrinsics-overloaded/vfadd.c | 36 +- .../RISCV/rvv-intrinsics-overloaded/vfclass.c | 18 +- .../RISCV/rvv-intrinsics-overloaded/vfcvt.c | 108 +-- .../RISCV/rvv-intrinsics-overloaded/vfdiv.c | 36 +- .../RISCV/rvv-intrinsics-overloaded/vfirst.c | 14 +- .../RISCV/rvv-intrinsics-overloaded/vfmacc.c | 36 +- .../RISCV/rvv-intrinsics-overloaded/vfmadd.c | 36 +- .../RISCV/rvv-intrinsics-overloaded/vfmax.c | 36 +- .../RISCV/rvv-intrinsics-overloaded/vfmerge.c | 18 +- .../RISCV/rvv-intrinsics-overloaded/vfmin.c | 36 +- .../RISCV/rvv-intrinsics-overloaded/vfmsac.c | 36 +- .../RISCV/rvv-intrinsics-overloaded/vfmsub.c | 36 +- .../RISCV/rvv-intrinsics-overloaded/vfmul.c | 36 +- .../RISCV/rvv-intrinsics-overloaded/vfncvt.c | 104 +-- .../RISCV/rvv-intrinsics-overloaded/vfneg.c | 18 +- .../RISCV/rvv-intrinsics-overloaded/vfnmacc.c | 36 +- .../RISCV/rvv-intrinsics-overloaded/vfnmadd.c | 36 +- .../RISCV/rvv-intrinsics-overloaded/vfnmsac.c | 36 +- .../RISCV/rvv-intrinsics-overloaded/vfnmsub.c | 36 +- .../RISCV/rvv-intrinsics-overloaded/vfrdiv.c | 18 +- .../RISCV/rvv-intrinsics-overloaded/vfrec7.c | 18 +- .../RISCV/rvv-intrinsics-overloaded/vfredmax.c | 18 +- .../RISCV/rvv-intrinsics-overloaded/vfredmin.c | 18 +- .../RISCV/rvv-intrinsics-overloaded/vfredsum.c | 36 +- .../RISCV/rvv-intrinsics-overloaded/vfrsqrt7.c | 18 +- .../RISCV/rvv-intrinsics-overloaded/vfrsub.c | 18 +- .../RISCV/rvv-intrinsics-overloaded/vfsgnj.c | 108 +-- .../RISCV/rvv-intrinsics-overloaded/vfslide1down.c | 18 +- .../RISCV/rvv-intrinsics-overloaded/vfslide1up.c | 18 +- .../RISCV/rvv-intrinsics-overloaded/vfsqrt.c | 18 +- .../RISCV/rvv-intrinsics-overloaded/vfsub.c | 36 +- .../RISCV/rvv-intrinsics-overloaded/vfwadd.c | 32 +- .../RISCV/rvv-intrinsics-overloaded/vfwcvt.c | 76 +- .../RISCV/rvv-intrinsics-overloaded/vfwmacc.c | 16 +- .../RISCV/rvv-intrinsics-overloaded/vfwmsac.c | 16 +- .../RISCV/rvv-intrinsics-overloaded/vfwmul.c | 16 +- .../RISCV/rvv-intrinsics-overloaded/vfwnmacc.c | 16 +- .../RISCV/rvv-intrinsics-overloaded/vfwnmsac.c | 16 +- .../RISCV/rvv-intrinsics-overloaded/vfwredsum.c | 20 +- .../RISCV/rvv-intrinsics-overloaded/vfwsub.c | 32 +- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vid.c | 44 +- .../RISCV/rvv-intrinsics-overloaded/viota.c | 44 +- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vle.c | 106 +-- .../RISCV/rvv-intrinsics-overloaded/vloxei.c | 382 +++++------ .../CodeGen/RISCV/rvv-intrinsics-overloaded/vlse.c | 106 +-- .../RISCV/rvv-intrinsics-overloaded/vluxei.c | 382 +++++------ .../RISCV/rvv-intrinsics-overloaded/vmacc.c | 176 ++--- .../RISCV/rvv-intrinsics-overloaded/vmadd.c | 176 ++--- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c | 176 ++--- .../RISCV/rvv-intrinsics-overloaded/vmerge.c | 194 +++--- .../RISCV/rvv-intrinsics-overloaded/vmfeq.c | 36 +- .../RISCV/rvv-intrinsics-overloaded/vmfge.c | 36 +- .../RISCV/rvv-intrinsics-overloaded/vmfgt.c | 36 +- .../RISCV/rvv-intrinsics-overloaded/vmfle.c | 36 +- .../RISCV/rvv-intrinsics-overloaded/vmflt.c | 36 +- .../RISCV/rvv-intrinsics-overloaded/vmfne.c | 36 +- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c | 176 ++--- .../RISCV/rvv-intrinsics-overloaded/vmsbf.c | 14 +- .../RISCV/rvv-intrinsics-overloaded/vmseq.c | 176 ++--- .../RISCV/rvv-intrinsics-overloaded/vmsge.c | 176 ++--- .../RISCV/rvv-intrinsics-overloaded/vmsgt.c | 176 ++--- .../RISCV/rvv-intrinsics-overloaded/vmsif.c | 14 +- .../RISCV/rvv-intrinsics-overloaded/vmsle.c | 176 ++--- .../RISCV/rvv-intrinsics-overloaded/vmslt.c | 176 ++--- .../RISCV/rvv-intrinsics-overloaded/vmsne.c | 176 ++--- .../RISCV/rvv-intrinsics-overloaded/vmsof.c | 14 +- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c | 440 ++++++------ .../RISCV/rvv-intrinsics-overloaded/vnclip.c | 120 ++-- .../RISCV/rvv-intrinsics-overloaded/vncvt.c | 60 +- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c | 44 +- .../RISCV/rvv-intrinsics-overloaded/vnmsac.c | 176 ++--- .../RISCV/rvv-intrinsics-overloaded/vnmsub.c | 176 ++--- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c | 88 +-- .../RISCV/rvv-intrinsics-overloaded/vnsra.c | 60 +- .../RISCV/rvv-intrinsics-overloaded/vnsrl.c | 60 +- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c | 176 ++--- .../RISCV/rvv-intrinsics-overloaded/vpopc.c | 14 +- .../RISCV/rvv-intrinsics-overloaded/vredand.c | 88 +-- .../RISCV/rvv-intrinsics-overloaded/vredmax.c | 88 +-- .../RISCV/rvv-intrinsics-overloaded/vredmin.c | 88 +-- .../RISCV/rvv-intrinsics-overloaded/vredor.c | 88 +-- .../RISCV/rvv-intrinsics-overloaded/vredsum.c | 88 +-- .../RISCV/rvv-intrinsics-overloaded/vredxor.c | 88 +-- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c | 176 ++--- .../RISCV/rvv-intrinsics-overloaded/vrgather.c | 314 ++++----- .../RISCV/rvv-intrinsics-overloaded/vrsub.c | 88 +-- .../RISCV/rvv-intrinsics-overloaded/vsadd.c | 176 ++--- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vse.c | 226 +++--- .../RISCV/rvv-intrinsics-overloaded/vsext.c | 56 +- .../RISCV/rvv-intrinsics-overloaded/vslide1down.c | 88 +-- .../RISCV/rvv-intrinsics-overloaded/vslide1up.c | 88 +-- .../RISCV/rvv-intrinsics-overloaded/vslidedown.c | 106 +-- .../RISCV/rvv-intrinsics-overloaded/vslideup.c | 106 +-- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c | 176 ++--- .../RISCV/rvv-intrinsics-overloaded/vsmul.c | 88 +-- .../RISCV/rvv-intrinsics-overloaded/vsoxei.c | 764 ++++++++++----------- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c | 88 +-- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c | 88 +-- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vsse.c | 212 +++--- .../RISCV/rvv-intrinsics-overloaded/vssra.c | 88 +-- .../RISCV/rvv-intrinsics-overloaded/vssrl.c | 88 +-- .../RISCV/rvv-intrinsics-overloaded/vssub.c | 176 ++--- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c | 176 ++--- .../RISCV/rvv-intrinsics-overloaded/vsuxei.c | 764 ++++++++++----------- .../RISCV/rvv-intrinsics-overloaded/vwadd.c | 240 +++---- .../RISCV/rvv-intrinsics-overloaded/vwcvt.c | 60 +- .../RISCV/rvv-intrinsics-overloaded/vwmacc.c | 210 +++--- .../RISCV/rvv-intrinsics-overloaded/vwmul.c | 180 ++--- .../RISCV/rvv-intrinsics-overloaded/vwredsum.c | 72 +- .../RISCV/rvv-intrinsics-overloaded/vwsub.c | 240 +++---- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c | 176 ++--- .../RISCV/rvv-intrinsics-overloaded/vzext.c | 56 +- clang/utils/TableGen/RISCVVEmitter.cpp | 106 +-- 128 files changed, 7068 insertions(+), 7090 deletions(-) diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td index 0502441..46f1a31 100644 --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -189,12 +189,6 @@ class RVVBuiltin IntrinsicTypes = []; - // When the order of the parameters of clang builtin do not match the order of - // C/C++ api, we use permutation index to mapping the operand from clang - // builtin to C/C++. It is parameter of the unmasked version without VL - // operand. If empty, the default permutation is [0, 1, 2, ...]. - list PermuteOperands = []; - // If these names are not empty, this is the ID of the LLVM intrinsic // we want to lower to. string IRName = NAME; @@ -562,6 +556,8 @@ let HasNoMaskedOverloaded = false, Ops[0] = Builder.CreateBitCast(Ops[0], ResultType->getPointerTo()); }], ManualCodegenMask= [{ + // Move mask to right before vl. + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); IntrinsicTypes = {ResultType, Ops[3]->getType()}; Ops[1] = Builder.CreateBitCast(Ops[1], ResultType->getPointerTo()); }] in { @@ -608,6 +604,8 @@ multiclass RVVVLEFFBuiltin types> { }], ManualCodegenMask = [{ { + // Move mask to right before vl. + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); IntrinsicTypes = {ResultType, Ops[4]->getType()}; Ops[1] = Builder.CreateBitCast(Ops[1], ResultType->getPointerTo()); Value *NewVL = Ops[2]; @@ -643,6 +641,8 @@ multiclass RVVVLSEBuiltin types> { Ops[0] = Builder.CreateBitCast(Ops[0], ResultType->getPointerTo()); }], ManualCodegenMask= [{ + // Move mask to right before vl. + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); IntrinsicTypes = {ResultType, Ops[4]->getType()}; Ops[1] = Builder.CreateBitCast(Ops[1], ResultType->getPointerTo()); }] in { @@ -661,6 +661,8 @@ multiclass RVVIndexedLoad { Ops[0] = Builder.CreateBitCast(Ops[0], ResultType->getPointerTo()); }], ManualCodegenMask = [{ + // Move mask to right before vl. + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); IntrinsicTypes = {ResultType, Ops[2]->getType(), Ops[4]->getType()}; Ops[1] = Builder.CreateBitCast(Ops[1], ResultType->getPointerTo()); }] in { @@ -680,16 +682,19 @@ multiclass RVVIndexedLoad { } let HasMaskedOffOperand = false, - PermuteOperands = [1, 0], // C/C++ Operand: (ptr, value, vl). Builtin: (value, ptr, vl) ManualCodegen = [{ + // Builtin: (ptr, value, vl). Intrinsic: (value, ptr, vl) + std::swap(Ops[0], Ops[1]); Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType()->getPointerTo()); IntrinsicTypes = {Ops[0]->getType(), Ops[2]->getType()}; }], ManualCodegenMask= [{ + // Builtin: (mask, ptr, value, vl). Intrinsic: (value, ptr, mask, vl) + std::swap(Ops[0], Ops[2]); Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType()->getPointerTo()); IntrinsicTypes = {Ops[0]->getType(), Ops[3]->getType()}; }] in { - class RVVVSEMaskBuiltin : RVVBuiltin<"m", "0mPUe", "c"> { + class RVVVSEMaskBuiltin : RVVBuiltin<"m", "0PUem", "c"> { let Name = "vse1_v"; let IRName = "vse1"; let HasMask = false; @@ -699,9 +704,9 @@ let HasMaskedOffOperand = false, IRName = "vse", IRNameMask = "vse_mask" in { foreach type = types in { - def : RVVBuiltin<"v", "0vPe", type>; + def : RVVBuiltin<"v", "0Pev", type>; if !not(IsFloat.val) then { - def : RVVBuiltin<"Uv", "0UvPUe", type>; + def : RVVBuiltin<"Uv", "0PUeUv", type>; } } } @@ -713,19 +718,22 @@ multiclass RVVVSSEBuiltin types> { IRName = "vsse", IRNameMask = "vsse_mask", HasMaskedOffOperand = false, - PermuteOperands = [1, 2, 0], // C/C++ Operand: (ptr, stride, value, vl). Builtin: (value, ptr, stride, vl) ManualCodegen = [{ + // Builtin: (ptr, stride, value, vl). Intrinsic: (value, ptr, stride, vl) + std::rotate(Ops.begin(), Ops.begin() + 2, Ops.begin() + 3); Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType()->getPointerTo()); IntrinsicTypes = {Ops[0]->getType(), Ops[3]->getType()}; }], ManualCodegenMask= [{ + // Builtin: (mask, ptr, stride, value, vl). Intrinsic: (value, ptr, stride, mask, vl) + std::swap(Ops[0], Ops[3]); Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType()->getPointerTo()); IntrinsicTypes = {Ops[0]->getType(), Ops[4]->getType()}; }] in { foreach type = types in { - def : RVVBuiltin<"v", "0vPet", type>; + def : RVVBuiltin<"v", "0Petv", type>; if !not(IsFloat.val) then { - def : RVVBuiltin<"Uv", "0UvPUet", type>; + def : RVVBuiltin<"Uv", "0PUetUv", type>; } } } @@ -733,12 +741,15 @@ multiclass RVVVSSEBuiltin types> { multiclass RVVIndexedStore { let HasMaskedOffOperand = false, - PermuteOperands = [1, 2, 0], // C/C++ Operand: (ptr, index, value, vl). Builtin: (value, ptr, index, vl) ManualCodegen = [{ + // Builtin: (ptr, index, value, vl). Intrinsic: (value, ptr, index, vl) + std::rotate(Ops.begin(), Ops.begin() + 2, Ops.begin() + 3); Ops[1] = Builder.CreateBitCast(Ops[1],Ops[0]->getType()->getPointerTo()); IntrinsicTypes = {Ops[0]->getType(), Ops[2]->getType(), Ops[3]->getType()}; }], ManualCodegenMask= [{ + // Builtin: (mask, ptr, index, value, vl). Intrinsic: (value, ptr, index, mask, vl) + std::swap(Ops[0], Ops[3]); Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType()->getPointerTo()); IntrinsicTypes = {Ops[0]->getType(), Ops[2]->getType(), Ops[4]->getType()}; }] in { @@ -747,9 +758,9 @@ multiclass RVVIndexedStore { defvar eew = eew_list[0]; defvar eew_type = eew_list[1]; let Name = op # eew # "_v", IRName = op, IRNameMask = op # "_mask" in { - def : RVVBuiltin<"v", "0vPe" # eew_type # "Uv", type>; + def : RVVBuiltin<"v", "0Pe" # eew_type # "Uvv", type>; if !not(IsFloat.val) then { - def : RVVBuiltin<"Uv", "0UvPUe" # eew_type # "Uv", type>; + def : RVVBuiltin<"Uv", "0PUe" # eew_type # "UvUv", type>; } } } @@ -774,6 +785,7 @@ multiclass RVVAMOBuiltinSetgetPointerTo()); }], ManualCodegenMask = [{ + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[4]->getType()}; Ops[0] = Builder.CreateBitCast(Ops[0], ResultType->getPointerTo()); }] in { @@ -801,6 +813,7 @@ multiclass RVVPseudoUnaryBuiltin { }], ManualCodegenMask = [{ { + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); // maskedoff, op1, mask, vl IntrinsicTypes = {ResultType, cast(ResultType)->getElementType(), @@ -830,6 +843,7 @@ multiclass RVVPseudoVNotBuiltin { }], ManualCodegenMask = [{ { + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); // maskedoff, op1, mask, vl IntrinsicTypes = {ResultType, cast(ResultType)->getElementType(), @@ -876,6 +890,7 @@ multiclass RVVPseudoVFUnaryBuiltin { }], ManualCodegenMask = [{ { + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); // maskedoff, op1, mask, vl IntrinsicTypes = {ResultType, Ops[1]->getType(), @@ -907,6 +922,7 @@ multiclass RVVPseudoVWCVTBuiltingetType(), @@ -941,6 +957,7 @@ multiclass RVVPseudoVNCVTBuiltingetType(), @@ -1216,9 +1233,17 @@ defm vwmaccus : RVVOutOp1Op2BuiltinSet<"vwmaccus", "csi", } // 12.15. Vector Integer Merge Instructions -// C/C++ Operand: (mask, op1, op2, vl), Builtin: (op1, op2, mask, vl) -let HasMask = false, PermuteOperands = [2, 0, 1] in { - defm vmerge : RVVCarryinBuiltinSet; +// C/C++ Operand: (mask, op1, op2, vl), Intrinsic: (op1, op2, mask, vl) +let HasMask = false, + ManualCodegen = [{ + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.begin() + 3); + IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[3]->getType()}; + }] in { + defm vmerge : RVVOutOp1BuiltinSet<"vmerge", "csil", + [["vvm", "v", "vmvv"], + ["vxm", "v", "vmve"], + ["vvm", "Uv", "UvmUvUv"], + ["vxm", "Uv", "UvmUvUe"]]>; } // 12.16. Vector Integer Move Instructions @@ -1335,11 +1360,15 @@ let Name = "vfclass_v" in // 14.15. Vector Floating-Point Merge Instructio // C/C++ Operand: (mask, op1, op2, vl), Builtin: (op1, op2, mask, vl) -let HasMask = false, PermuteOperands = [2, 0, 1] in { +let HasMask = false, + ManualCodegen = [{ + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.begin() + 3); + IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[3]->getType()}; + }] in { defm vmerge : RVVOutOp1BuiltinSet<"vfmerge", "fd", - [["vvm", "v", "vvvm"]]>; + [["vvm", "v", "vmvv"]]>; defm vfmerge : RVVOutOp1BuiltinSet<"vfmerge", "fd", - [["vfm", "v", "vvem"]]>; + [["vfm", "v", "vmve"]]>; } // 14.16. Vector Floating-Point Move Instruction @@ -1503,13 +1532,18 @@ defm vrgatherei16 : RVVOutBuiltinSet<"vrgatherei16_vv", "csil", [["vv", "Uv", "UvUv(Log2EEW:4)Uv"]]>; // 17.5. Vector Compress Instruction -let HasMask = false, PermuteOperands = [2, 0, 1] in { +let HasMask = false, + ManualCodegen = [{ + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.begin() + 3); + ID = Intrinsic::riscv_vcompress; + IntrinsicTypes = {ResultType, Ops[3]->getType()}; + }] in { // signed and floating type defm vcompress : RVVOutBuiltinSet<"vcompress", "csilfd", - [["vm", "v", "vvvm"]]>; + [["vm", "v", "vmvv"]]>; // unsigned type defm vcompress : RVVOutBuiltinSet<"vcompress", "csil", - [["vm", "Uv", "UvUvUvm"]]>; + [["vm", "Uv", "UvmUvUv"]]>; } // Miscellaneous diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c index cc3a9e3..1f8361a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c @@ -890,7 +890,7 @@ vuint64m8_t test_vaaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { // // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vaadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, @@ -901,7 +901,7 @@ vint8mf8_t test_vaadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, // // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vaadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, @@ -912,7 +912,7 @@ vint8mf8_t test_vaadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, // // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vaadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, @@ -923,7 +923,7 @@ vint8mf4_t test_vaadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vaadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, @@ -934,7 +934,7 @@ vint8mf4_t test_vaadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vaadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, @@ -945,7 +945,7 @@ vint8mf2_t test_vaadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vaadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, @@ -956,7 +956,7 @@ vint8mf2_t test_vaadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vaadd_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vaadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, @@ -967,7 +967,7 @@ vint8m1_t test_vaadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vaadd_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vaadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, @@ -978,7 +978,7 @@ vint8m1_t test_vaadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vaadd_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vaadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, @@ -989,7 +989,7 @@ vint8m2_t test_vaadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vaadd_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vaadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, @@ -1000,7 +1000,7 @@ vint8m2_t test_vaadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vaadd_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vaadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, @@ -1011,7 +1011,7 @@ vint8m4_t test_vaadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vaadd_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vaadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, @@ -1022,7 +1022,7 @@ vint8m4_t test_vaadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vaadd_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vaadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, @@ -1033,7 +1033,7 @@ vint8m8_t test_vaadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vaadd_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vaadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, @@ -1044,7 +1044,7 @@ vint8m8_t test_vaadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vaadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -1056,7 +1056,7 @@ vint16mf4_t test_vaadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vaadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -1067,7 +1067,7 @@ vint16mf4_t test_vaadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vaadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -1079,7 +1079,7 @@ vint16mf2_t test_vaadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vaadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -1090,7 +1090,7 @@ vint16mf2_t test_vaadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vaadd_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vaadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -1101,7 +1101,7 @@ vint16m1_t test_vaadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vaadd_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vaadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -1112,7 +1112,7 @@ vint16m1_t test_vaadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vaadd_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vaadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -1123,7 +1123,7 @@ vint16m2_t test_vaadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vaadd_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vaadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -1134,7 +1134,7 @@ vint16m2_t test_vaadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vaadd_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vaadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -1145,7 +1145,7 @@ vint16m4_t test_vaadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vaadd_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vaadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -1156,7 +1156,7 @@ vint16m4_t test_vaadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vaadd_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vaadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -1167,7 +1167,7 @@ vint16m8_t test_vaadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vaadd_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vaadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -1178,7 +1178,7 @@ vint16m8_t test_vaadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vaadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -1190,7 +1190,7 @@ vint32mf2_t test_vaadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vaadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -1201,7 +1201,7 @@ vint32mf2_t test_vaadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vaadd_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vaadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -1212,7 +1212,7 @@ vint32m1_t test_vaadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vaadd_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vaadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -1223,7 +1223,7 @@ vint32m1_t test_vaadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vaadd_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vaadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -1234,7 +1234,7 @@ vint32m2_t test_vaadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vaadd_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vaadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -1245,7 +1245,7 @@ vint32m2_t test_vaadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vaadd_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vaadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -1256,7 +1256,7 @@ vint32m4_t test_vaadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vaadd_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vaadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -1267,7 +1267,7 @@ vint32m4_t test_vaadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vaadd_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vaadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -1278,7 +1278,7 @@ vint32m8_t test_vaadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vaadd_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vaadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -1289,7 +1289,7 @@ vint32m8_t test_vaadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vaadd_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vaadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -1300,7 +1300,7 @@ vint64m1_t test_vaadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vaadd_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vaadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -1311,7 +1311,7 @@ vint64m1_t test_vaadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vaadd_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vaadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -1322,7 +1322,7 @@ vint64m2_t test_vaadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vaadd_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vaadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -1333,7 +1333,7 @@ vint64m2_t test_vaadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vaadd_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vaadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -1344,7 +1344,7 @@ vint64m4_t test_vaadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vaadd_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vaadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -1355,7 +1355,7 @@ vint64m4_t test_vaadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vaadd_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vaadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -1366,7 +1366,7 @@ vint64m8_t test_vaadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vaadd_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vaadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -1377,7 +1377,7 @@ vint64m8_t test_vaadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vaaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, @@ -1389,7 +1389,7 @@ vuint8mf8_t test_vaaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, // // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vaaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, @@ -1400,7 +1400,7 @@ vuint8mf8_t test_vaaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, // // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vaaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, @@ -1412,7 +1412,7 @@ vuint8mf4_t test_vaaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vaaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, @@ -1423,7 +1423,7 @@ vuint8mf4_t test_vaaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vaaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, @@ -1435,7 +1435,7 @@ vuint8mf2_t test_vaaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vaaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, @@ -1446,7 +1446,7 @@ vuint8mf2_t test_vaaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vaaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, @@ -1457,7 +1457,7 @@ vuint8m1_t test_vaaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vaaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, @@ -1468,7 +1468,7 @@ vuint8m1_t test_vaaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vaaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, @@ -1479,7 +1479,7 @@ vuint8m2_t test_vaaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vaaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, @@ -1490,7 +1490,7 @@ vuint8m2_t test_vaaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vaaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, @@ -1501,7 +1501,7 @@ vuint8m4_t test_vaaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vaaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, @@ -1512,7 +1512,7 @@ vuint8m4_t test_vaaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vaaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, @@ -1523,7 +1523,7 @@ vuint8m8_t test_vaaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vaaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, @@ -1534,7 +1534,7 @@ vuint8m8_t test_vaaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vaaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -1546,7 +1546,7 @@ vuint16mf4_t test_vaaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vaaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -1558,7 +1558,7 @@ vuint16mf4_t test_vaaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vaaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -1570,7 +1570,7 @@ vuint16mf2_t test_vaaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vaaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -1582,7 +1582,7 @@ vuint16mf2_t test_vaaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vaaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -1594,7 +1594,7 @@ vuint16m1_t test_vaaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vaaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -1605,7 +1605,7 @@ vuint16m1_t test_vaaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vaaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -1617,7 +1617,7 @@ vuint16m2_t test_vaaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vaaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -1628,7 +1628,7 @@ vuint16m2_t test_vaaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vaaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -1640,7 +1640,7 @@ vuint16m4_t test_vaaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vaaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -1651,7 +1651,7 @@ vuint16m4_t test_vaaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vaaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -1663,7 +1663,7 @@ vuint16m8_t test_vaaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vaaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -1674,7 +1674,7 @@ vuint16m8_t test_vaaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vaaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -1686,7 +1686,7 @@ vuint32mf2_t test_vaaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vaaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -1698,7 +1698,7 @@ vuint32mf2_t test_vaaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vaaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -1710,7 +1710,7 @@ vuint32m1_t test_vaaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vaaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -1721,7 +1721,7 @@ vuint32m1_t test_vaaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vaaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -1733,7 +1733,7 @@ vuint32m2_t test_vaaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vaaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -1744,7 +1744,7 @@ vuint32m2_t test_vaaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vaaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -1756,7 +1756,7 @@ vuint32m4_t test_vaaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vaaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -1767,7 +1767,7 @@ vuint32m4_t test_vaaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vaaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -1779,7 +1779,7 @@ vuint32m8_t test_vaaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vaaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -1790,7 +1790,7 @@ vuint32m8_t test_vaaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vaaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -1802,7 +1802,7 @@ vuint64m1_t test_vaaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vaaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -1813,7 +1813,7 @@ vuint64m1_t test_vaaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vaaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -1825,7 +1825,7 @@ vuint64m2_t test_vaaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vaaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -1836,7 +1836,7 @@ vuint64m2_t test_vaaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vaaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -1848,7 +1848,7 @@ vuint64m4_t test_vaaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vaaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -1859,7 +1859,7 @@ vuint64m4_t test_vaaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vaaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, @@ -1871,7 +1871,7 @@ vuint64m8_t test_vaaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vaaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c index bcd1557..ae3a230 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c @@ -888,7 +888,7 @@ vuint64m8_t test_vadd_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { // // CHECK-RV64-LABEL: @test_vadd_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -898,7 +898,7 @@ vint8mf8_t test_vadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t // // CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { @@ -908,7 +908,7 @@ vint8mf8_t test_vadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t // // CHECK-RV64-LABEL: @test_vadd_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -918,7 +918,7 @@ vint8mf4_t test_vadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t // // CHECK-RV64-LABEL: @test_vadd_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { @@ -928,7 +928,7 @@ vint8mf4_t test_vadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t // // CHECK-RV64-LABEL: @test_vadd_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -938,7 +938,7 @@ vint8mf2_t test_vadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t // // CHECK-RV64-LABEL: @test_vadd_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { @@ -948,7 +948,7 @@ vint8mf2_t test_vadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t // // CHECK-RV64-LABEL: @test_vadd_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -958,7 +958,7 @@ vint8m1_t test_vadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, // // CHECK-RV64-LABEL: @test_vadd_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { @@ -968,7 +968,7 @@ vint8m1_t test_vadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, // // CHECK-RV64-LABEL: @test_vadd_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -978,7 +978,7 @@ vint8m2_t test_vadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, // // CHECK-RV64-LABEL: @test_vadd_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { @@ -988,7 +988,7 @@ vint8m2_t test_vadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, // // CHECK-RV64-LABEL: @test_vadd_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -998,7 +998,7 @@ vint8m4_t test_vadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, // // CHECK-RV64-LABEL: @test_vadd_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { @@ -1008,7 +1008,7 @@ vint8m4_t test_vadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, // // CHECK-RV64-LABEL: @test_vadd_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -1018,7 +1018,7 @@ vint8m8_t test_vadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, // // CHECK-RV64-LABEL: @test_vadd_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { @@ -1028,7 +1028,7 @@ vint8m8_t test_vadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, // // CHECK-RV64-LABEL: @test_vadd_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -1038,7 +1038,7 @@ vint16mf4_t test_vadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vadd_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { @@ -1048,7 +1048,7 @@ vint16mf4_t test_vadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vadd_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -1058,7 +1058,7 @@ vint16mf2_t test_vadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vadd_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { @@ -1068,7 +1068,7 @@ vint16mf2_t test_vadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vadd_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -1078,7 +1078,7 @@ vint16m1_t test_vadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t // // CHECK-RV64-LABEL: @test_vadd_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { @@ -1088,7 +1088,7 @@ vint16m1_t test_vadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t // // CHECK-RV64-LABEL: @test_vadd_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -1098,7 +1098,7 @@ vint16m2_t test_vadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // // CHECK-RV64-LABEL: @test_vadd_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { @@ -1108,7 +1108,7 @@ vint16m2_t test_vadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // // CHECK-RV64-LABEL: @test_vadd_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1118,7 +1118,7 @@ vint16m4_t test_vadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // // CHECK-RV64-LABEL: @test_vadd_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { @@ -1128,7 +1128,7 @@ vint16m4_t test_vadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // // CHECK-RV64-LABEL: @test_vadd_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -1138,7 +1138,7 @@ vint16m8_t test_vadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // // CHECK-RV64-LABEL: @test_vadd_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { @@ -1148,7 +1148,7 @@ vint16m8_t test_vadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // // CHECK-RV64-LABEL: @test_vadd_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1158,7 +1158,7 @@ vint32mf2_t test_vadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m // // CHECK-RV64-LABEL: @test_vadd_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { @@ -1168,7 +1168,7 @@ vint32mf2_t test_vadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m // // CHECK-RV64-LABEL: @test_vadd_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1178,7 +1178,7 @@ vint32m1_t test_vadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t // // CHECK-RV64-LABEL: @test_vadd_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { @@ -1188,7 +1188,7 @@ vint32m1_t test_vadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t // // CHECK-RV64-LABEL: @test_vadd_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1198,7 +1198,7 @@ vint32m2_t test_vadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t // // CHECK-RV64-LABEL: @test_vadd_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { @@ -1208,7 +1208,7 @@ vint32m2_t test_vadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t // // CHECK-RV64-LABEL: @test_vadd_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1218,7 +1218,7 @@ vint32m4_t test_vadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // // CHECK-RV64-LABEL: @test_vadd_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { @@ -1228,7 +1228,7 @@ vint32m4_t test_vadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // // CHECK-RV64-LABEL: @test_vadd_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1238,7 +1238,7 @@ vint32m8_t test_vadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // // CHECK-RV64-LABEL: @test_vadd_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { @@ -1248,7 +1248,7 @@ vint32m8_t test_vadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // // CHECK-RV64-LABEL: @test_vadd_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -1258,7 +1258,7 @@ vint64m1_t test_vadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t // // CHECK-RV64-LABEL: @test_vadd_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { @@ -1268,7 +1268,7 @@ vint64m1_t test_vadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t // // CHECK-RV64-LABEL: @test_vadd_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -1278,7 +1278,7 @@ vint64m2_t test_vadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t // // CHECK-RV64-LABEL: @test_vadd_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { @@ -1288,7 +1288,7 @@ vint64m2_t test_vadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t // // CHECK-RV64-LABEL: @test_vadd_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -1298,7 +1298,7 @@ vint64m4_t test_vadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t // // CHECK-RV64-LABEL: @test_vadd_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { @@ -1308,7 +1308,7 @@ vint64m4_t test_vadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t // // CHECK-RV64-LABEL: @test_vadd_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -1318,7 +1318,7 @@ vint64m8_t test_vadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // // CHECK-RV64-LABEL: @test_vadd_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { @@ -1328,7 +1328,7 @@ vint64m8_t test_vadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // // CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1338,7 +1338,7 @@ vuint8mf8_t test_vadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf // // CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -1348,7 +1348,7 @@ vuint8mf8_t test_vadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf // // CHECK-RV64-LABEL: @test_vadd_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1358,7 +1358,7 @@ vuint8mf4_t test_vadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf // // CHECK-RV64-LABEL: @test_vadd_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -1368,7 +1368,7 @@ vuint8mf4_t test_vadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf // // CHECK-RV64-LABEL: @test_vadd_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1378,7 +1378,7 @@ vuint8mf2_t test_vadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf // // CHECK-RV64-LABEL: @test_vadd_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -1388,7 +1388,7 @@ vuint8mf2_t test_vadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf // // CHECK-RV64-LABEL: @test_vadd_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1398,7 +1398,7 @@ vuint8m1_t test_vadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t o // // CHECK-RV64-LABEL: @test_vadd_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -1408,7 +1408,7 @@ vuint8m1_t test_vadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t o // // CHECK-RV64-LABEL: @test_vadd_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1418,7 +1418,7 @@ vuint8m2_t test_vadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t o // // CHECK-RV64-LABEL: @test_vadd_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -1428,7 +1428,7 @@ vuint8m2_t test_vadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t o // // CHECK-RV64-LABEL: @test_vadd_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -1438,7 +1438,7 @@ vuint8m4_t test_vadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t o // // CHECK-RV64-LABEL: @test_vadd_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -1448,7 +1448,7 @@ vuint8m4_t test_vadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t o // // CHECK-RV64-LABEL: @test_vadd_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -1458,7 +1458,7 @@ vuint8m8_t test_vadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t o // // CHECK-RV64-LABEL: @test_vadd_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -1468,7 +1468,7 @@ vuint8m8_t test_vadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t o // // CHECK-RV64-LABEL: @test_vadd_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -1478,7 +1478,7 @@ vuint16mf4_t test_vadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vadd_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -1488,7 +1488,7 @@ vuint16mf4_t test_vadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vadd_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -1498,7 +1498,7 @@ vuint16mf2_t test_vadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vadd_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -1508,7 +1508,7 @@ vuint16mf2_t test_vadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vadd_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -1518,7 +1518,7 @@ vuint16m1_t test_vadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m // // CHECK-RV64-LABEL: @test_vadd_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -1528,7 +1528,7 @@ vuint16m1_t test_vadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m // // CHECK-RV64-LABEL: @test_vadd_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -1538,7 +1538,7 @@ vuint16m2_t test_vadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2 // // CHECK-RV64-LABEL: @test_vadd_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -1548,7 +1548,7 @@ vuint16m2_t test_vadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2 // // CHECK-RV64-LABEL: @test_vadd_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -1558,7 +1558,7 @@ vuint16m4_t test_vadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4 // // CHECK-RV64-LABEL: @test_vadd_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -1568,7 +1568,7 @@ vuint16m4_t test_vadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4 // // CHECK-RV64-LABEL: @test_vadd_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -1578,7 +1578,7 @@ vuint16m8_t test_vadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8 // // CHECK-RV64-LABEL: @test_vadd_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -1588,7 +1588,7 @@ vuint16m8_t test_vadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8 // // CHECK-RV64-LABEL: @test_vadd_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1598,7 +1598,7 @@ vuint32mf2_t test_vadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vadd_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1608,7 +1608,7 @@ vuint32mf2_t test_vadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vadd_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1618,7 +1618,7 @@ vuint32m1_t test_vadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vadd_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1628,7 +1628,7 @@ vuint32m1_t test_vadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vadd_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1638,7 +1638,7 @@ vuint32m2_t test_vadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vadd_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1648,7 +1648,7 @@ vuint32m2_t test_vadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vadd_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1658,7 +1658,7 @@ vuint32m4_t test_vadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4 // // CHECK-RV64-LABEL: @test_vadd_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1668,7 +1668,7 @@ vuint32m4_t test_vadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4 // // CHECK-RV64-LABEL: @test_vadd_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1678,7 +1678,7 @@ vuint32m8_t test_vadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8 // // CHECK-RV64-LABEL: @test_vadd_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -1688,7 +1688,7 @@ vuint32m8_t test_vadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8 // // CHECK-RV64-LABEL: @test_vadd_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -1698,7 +1698,7 @@ vuint64m1_t test_vadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vadd_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -1708,7 +1708,7 @@ vuint64m1_t test_vadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vadd_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -1718,7 +1718,7 @@ vuint64m2_t test_vadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vadd_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -1728,7 +1728,7 @@ vuint64m2_t test_vadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vadd_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -1738,7 +1738,7 @@ vuint64m4_t test_vadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vadd_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -1748,7 +1748,7 @@ vuint64m4_t test_vadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vadd_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1758,7 +1758,7 @@ vuint64m8_t test_vadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8 // // CHECK-RV64-LABEL: @test_vadd_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoadd.c index 92a9918..49ceb9a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoadd.c @@ -778,7 +778,7 @@ vuint64m8_t test_vamoaddei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint6 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoaddei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { @@ -789,7 +789,7 @@ vint32mf2_t test_vamoaddei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8 // CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoaddei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { @@ -800,7 +800,7 @@ vint32m1_t test_vamoaddei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t // CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoaddei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { @@ -811,7 +811,7 @@ vint32m2_t test_vamoaddei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t // CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoaddei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { @@ -822,7 +822,7 @@ vint32m4_t test_vamoaddei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t b // CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamoaddei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { @@ -833,7 +833,7 @@ vint32m8_t test_vamoaddei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t b // CHECK-RV64-LABEL: @test_vamoaddei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoaddei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { @@ -844,7 +844,7 @@ vint32mf2_t test_vamoaddei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16m // CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoaddei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { @@ -855,7 +855,7 @@ vint32m1_t test_vamoaddei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoaddei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { @@ -866,7 +866,7 @@ vint32m2_t test_vamoaddei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_ // CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoaddei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { @@ -877,7 +877,7 @@ vint32m4_t test_vamoaddei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t // CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamoaddei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { @@ -888,7 +888,7 @@ vint32m8_t test_vamoaddei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t // CHECK-RV64-LABEL: @test_vamoaddei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoaddei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { @@ -899,7 +899,7 @@ vint32mf2_t test_vamoaddei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32m // CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoaddei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { @@ -910,7 +910,7 @@ vint32m1_t test_vamoaddei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_ // CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoaddei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { @@ -921,7 +921,7 @@ vint32m2_t test_vamoaddei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_ // CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoaddei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { @@ -932,7 +932,7 @@ vint32m4_t test_vamoaddei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t // CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamoaddei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { @@ -943,7 +943,7 @@ vint32m8_t test_vamoaddei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t // CHECK-RV64-LABEL: @test_vamoaddei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoaddei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { @@ -954,7 +954,7 @@ vint32mf2_t test_vamoaddei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m // CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoaddei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { @@ -965,7 +965,7 @@ vint32m1_t test_vamoaddei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_ // CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoaddei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { @@ -976,7 +976,7 @@ vint32m2_t test_vamoaddei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_ // CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoaddei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { @@ -987,7 +987,7 @@ vint32m4_t test_vamoaddei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t // CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoaddei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { @@ -998,7 +998,7 @@ vint64m1_t test_vamoaddei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t // CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoaddei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { @@ -1009,7 +1009,7 @@ vint64m2_t test_vamoaddei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t // CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoaddei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { @@ -1020,7 +1020,7 @@ vint64m4_t test_vamoaddei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t // CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoaddei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { @@ -1031,7 +1031,7 @@ vint64m8_t test_vamoaddei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t b // CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoaddei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { @@ -1042,7 +1042,7 @@ vint64m1_t test_vamoaddei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoaddei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { @@ -1053,7 +1053,7 @@ vint64m2_t test_vamoaddei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2 // CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoaddei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { @@ -1064,7 +1064,7 @@ vint64m4_t test_vamoaddei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_ // CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoaddei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { @@ -1075,7 +1075,7 @@ vint64m8_t test_vamoaddei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t // CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoaddei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { @@ -1086,7 +1086,7 @@ vint64m1_t test_vamoaddei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2 // CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoaddei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { @@ -1097,7 +1097,7 @@ vint64m2_t test_vamoaddei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_ // CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoaddei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { @@ -1108,7 +1108,7 @@ vint64m4_t test_vamoaddei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_ // CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoaddei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { @@ -1119,7 +1119,7 @@ vint64m8_t test_vamoaddei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t // CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoaddei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { @@ -1130,7 +1130,7 @@ vint64m1_t test_vamoaddei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_ // CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoaddei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { @@ -1141,7 +1141,7 @@ vint64m2_t test_vamoaddei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_ // CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoaddei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { @@ -1152,7 +1152,7 @@ vint64m4_t test_vamoaddei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_ // CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoaddei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { @@ -1163,7 +1163,7 @@ vint64m8_t test_vamoaddei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t // CHECK-RV64-LABEL: @test_vamoaddei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoaddei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { @@ -1174,7 +1174,7 @@ vuint32mf2_t test_vamoaddei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8m // CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoaddei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { @@ -1185,7 +1185,7 @@ vuint32m1_t test_vamoaddei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoaddei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { @@ -1196,7 +1196,7 @@ vuint32m2_t test_vamoaddei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoaddei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { @@ -1207,7 +1207,7 @@ vuint32m4_t test_vamoaddei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t // CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamoaddei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { @@ -1218,7 +1218,7 @@ vuint32m8_t test_vamoaddei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t // CHECK-RV64-LABEL: @test_vamoaddei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoaddei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { @@ -1229,7 +1229,7 @@ vuint32mf2_t test_vamoaddei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint1 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoaddei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { @@ -1240,7 +1240,7 @@ vuint32m1_t test_vamoaddei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16m // CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoaddei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { @@ -1251,7 +1251,7 @@ vuint32m2_t test_vamoaddei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m // CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoaddei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { @@ -1262,7 +1262,7 @@ vuint32m4_t test_vamoaddei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2 // CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamoaddei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { @@ -1273,7 +1273,7 @@ vuint32m8_t test_vamoaddei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoaddei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { @@ -1284,7 +1284,7 @@ vuint32mf2_t test_vamoaddei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint3 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoaddei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { @@ -1295,7 +1295,7 @@ vuint32m1_t test_vamoaddei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m // CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoaddei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { @@ -1306,7 +1306,7 @@ vuint32m2_t test_vamoaddei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m // CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoaddei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { @@ -1317,7 +1317,7 @@ vuint32m4_t test_vamoaddei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamoaddei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { @@ -1328,7 +1328,7 @@ vuint32m8_t test_vamoaddei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoaddei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { @@ -1339,7 +1339,7 @@ vuint32mf2_t test_vamoaddei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint6 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoaddei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { @@ -1350,7 +1350,7 @@ vuint32m1_t test_vamoaddei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m // CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoaddei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { @@ -1361,7 +1361,7 @@ vuint32m2_t test_vamoaddei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m // CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoaddei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { @@ -1372,7 +1372,7 @@ vuint32m4_t test_vamoaddei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoaddei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { @@ -1383,7 +1383,7 @@ vuint64m1_t test_vamoaddei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoaddei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { @@ -1394,7 +1394,7 @@ vuint64m2_t test_vamoaddei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoaddei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { @@ -1405,7 +1405,7 @@ vuint64m4_t test_vamoaddei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2 // CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoaddei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { @@ -1416,7 +1416,7 @@ vuint64m8_t test_vamoaddei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t // CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoaddei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { @@ -1427,7 +1427,7 @@ vuint64m1_t test_vamoaddei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16m // CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoaddei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { @@ -1438,7 +1438,7 @@ vuint64m2_t test_vamoaddei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16m // CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoaddei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { @@ -1449,7 +1449,7 @@ vuint64m4_t test_vamoaddei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m // CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoaddei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { @@ -1460,7 +1460,7 @@ vuint64m8_t test_vamoaddei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2 // CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoaddei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { @@ -1471,7 +1471,7 @@ vuint64m1_t test_vamoaddei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32m // CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoaddei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { @@ -1482,7 +1482,7 @@ vuint64m2_t test_vamoaddei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m // CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoaddei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { @@ -1493,7 +1493,7 @@ vuint64m4_t test_vamoaddei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m // CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoaddei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { @@ -1504,7 +1504,7 @@ vuint64m8_t test_vamoaddei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4 // CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoaddei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { @@ -1515,7 +1515,7 @@ vuint64m1_t test_vamoaddei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m // CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoaddei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { @@ -1526,7 +1526,7 @@ vuint64m2_t test_vamoaddei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m // CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoaddei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { @@ -1537,7 +1537,7 @@ vuint64m4_t test_vamoaddei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m // CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoaddei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoand.c index 0fb76be..0a71bf8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoand.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoand.c @@ -778,7 +778,7 @@ vuint64m8_t test_vamoandei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint6 // CHECK-RV64-LABEL: @test_vamoandei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoandei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { @@ -789,7 +789,7 @@ vint32mf2_t test_vamoandei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8 // CHECK-RV64-LABEL: @test_vamoandei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoandei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { @@ -800,7 +800,7 @@ vint32m1_t test_vamoandei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t // CHECK-RV64-LABEL: @test_vamoandei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoandei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { @@ -811,7 +811,7 @@ vint32m2_t test_vamoandei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t // CHECK-RV64-LABEL: @test_vamoandei8_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoandei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { @@ -822,7 +822,7 @@ vint32m4_t test_vamoandei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t b // CHECK-RV64-LABEL: @test_vamoandei8_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamoandei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { @@ -833,7 +833,7 @@ vint32m8_t test_vamoandei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t b // CHECK-RV64-LABEL: @test_vamoandei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoandei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { @@ -844,7 +844,7 @@ vint32mf2_t test_vamoandei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16m // CHECK-RV64-LABEL: @test_vamoandei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoandei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { @@ -855,7 +855,7 @@ vint32m1_t test_vamoandei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2 // CHECK-RV64-LABEL: @test_vamoandei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoandei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { @@ -866,7 +866,7 @@ vint32m2_t test_vamoandei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_ // CHECK-RV64-LABEL: @test_vamoandei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoandei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { @@ -877,7 +877,7 @@ vint32m4_t test_vamoandei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t // CHECK-RV64-LABEL: @test_vamoandei16_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamoandei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { @@ -888,7 +888,7 @@ vint32m8_t test_vamoandei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t // CHECK-RV64-LABEL: @test_vamoandei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoandei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { @@ -899,7 +899,7 @@ vint32mf2_t test_vamoandei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32m // CHECK-RV64-LABEL: @test_vamoandei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoandei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { @@ -910,7 +910,7 @@ vint32m1_t test_vamoandei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_ // CHECK-RV64-LABEL: @test_vamoandei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoandei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { @@ -921,7 +921,7 @@ vint32m2_t test_vamoandei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_ // CHECK-RV64-LABEL: @test_vamoandei32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoandei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { @@ -932,7 +932,7 @@ vint32m4_t test_vamoandei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t // CHECK-RV64-LABEL: @test_vamoandei32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamoandei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { @@ -943,7 +943,7 @@ vint32m8_t test_vamoandei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t // CHECK-RV64-LABEL: @test_vamoandei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoandei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { @@ -954,7 +954,7 @@ vint32mf2_t test_vamoandei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m // CHECK-RV64-LABEL: @test_vamoandei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoandei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { @@ -965,7 +965,7 @@ vint32m1_t test_vamoandei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_ // CHECK-RV64-LABEL: @test_vamoandei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoandei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { @@ -976,7 +976,7 @@ vint32m2_t test_vamoandei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_ // CHECK-RV64-LABEL: @test_vamoandei64_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoandei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { @@ -987,7 +987,7 @@ vint32m4_t test_vamoandei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t // CHECK-RV64-LABEL: @test_vamoandei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoandei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { @@ -998,7 +998,7 @@ vint64m1_t test_vamoandei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t // CHECK-RV64-LABEL: @test_vamoandei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoandei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { @@ -1009,7 +1009,7 @@ vint64m2_t test_vamoandei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t // CHECK-RV64-LABEL: @test_vamoandei8_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoandei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { @@ -1020,7 +1020,7 @@ vint64m4_t test_vamoandei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t // CHECK-RV64-LABEL: @test_vamoandei8_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoandei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { @@ -1031,7 +1031,7 @@ vint64m8_t test_vamoandei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t b // CHECK-RV64-LABEL: @test_vamoandei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoandei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { @@ -1042,7 +1042,7 @@ vint64m1_t test_vamoandei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4 // CHECK-RV64-LABEL: @test_vamoandei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoandei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { @@ -1053,7 +1053,7 @@ vint64m2_t test_vamoandei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2 // CHECK-RV64-LABEL: @test_vamoandei16_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoandei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { @@ -1064,7 +1064,7 @@ vint64m4_t test_vamoandei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_ // CHECK-RV64-LABEL: @test_vamoandei16_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoandei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { @@ -1075,7 +1075,7 @@ vint64m8_t test_vamoandei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t // CHECK-RV64-LABEL: @test_vamoandei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoandei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { @@ -1086,7 +1086,7 @@ vint64m1_t test_vamoandei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2 // CHECK-RV64-LABEL: @test_vamoandei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoandei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { @@ -1097,7 +1097,7 @@ vint64m2_t test_vamoandei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_ // CHECK-RV64-LABEL: @test_vamoandei32_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoandei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { @@ -1108,7 +1108,7 @@ vint64m4_t test_vamoandei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_ // CHECK-RV64-LABEL: @test_vamoandei32_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoandei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { @@ -1119,7 +1119,7 @@ vint64m8_t test_vamoandei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t // CHECK-RV64-LABEL: @test_vamoandei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoandei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { @@ -1130,7 +1130,7 @@ vint64m1_t test_vamoandei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_ // CHECK-RV64-LABEL: @test_vamoandei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoandei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { @@ -1141,7 +1141,7 @@ vint64m2_t test_vamoandei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_ // CHECK-RV64-LABEL: @test_vamoandei64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoandei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { @@ -1152,7 +1152,7 @@ vint64m4_t test_vamoandei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_ // CHECK-RV64-LABEL: @test_vamoandei64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoandei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { @@ -1163,7 +1163,7 @@ vint64m8_t test_vamoandei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t // CHECK-RV64-LABEL: @test_vamoandei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoandei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { @@ -1174,7 +1174,7 @@ vuint32mf2_t test_vamoandei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8m // CHECK-RV64-LABEL: @test_vamoandei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoandei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { @@ -1185,7 +1185,7 @@ vuint32m1_t test_vamoandei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4 // CHECK-RV64-LABEL: @test_vamoandei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoandei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { @@ -1196,7 +1196,7 @@ vuint32m2_t test_vamoandei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2 // CHECK-RV64-LABEL: @test_vamoandei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoandei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { @@ -1207,7 +1207,7 @@ vuint32m4_t test_vamoandei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t // CHECK-RV64-LABEL: @test_vamoandei8_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamoandei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { @@ -1218,7 +1218,7 @@ vuint32m8_t test_vamoandei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t // CHECK-RV64-LABEL: @test_vamoandei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoandei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { @@ -1229,7 +1229,7 @@ vuint32mf2_t test_vamoandei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint1 // CHECK-RV64-LABEL: @test_vamoandei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoandei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { @@ -1240,7 +1240,7 @@ vuint32m1_t test_vamoandei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16m // CHECK-RV64-LABEL: @test_vamoandei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoandei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { @@ -1251,7 +1251,7 @@ vuint32m2_t test_vamoandei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m // CHECK-RV64-LABEL: @test_vamoandei16_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoandei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { @@ -1262,7 +1262,7 @@ vuint32m4_t test_vamoandei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2 // CHECK-RV64-LABEL: @test_vamoandei16_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamoandei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { @@ -1273,7 +1273,7 @@ vuint32m8_t test_vamoandei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4 // CHECK-RV64-LABEL: @test_vamoandei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoandei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { @@ -1284,7 +1284,7 @@ vuint32mf2_t test_vamoandei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint3 // CHECK-RV64-LABEL: @test_vamoandei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoandei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { @@ -1295,7 +1295,7 @@ vuint32m1_t test_vamoandei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m // CHECK-RV64-LABEL: @test_vamoandei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoandei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { @@ -1306,7 +1306,7 @@ vuint32m2_t test_vamoandei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m // CHECK-RV64-LABEL: @test_vamoandei32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoandei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { @@ -1317,7 +1317,7 @@ vuint32m4_t test_vamoandei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4 // CHECK-RV64-LABEL: @test_vamoandei32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamoandei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { @@ -1328,7 +1328,7 @@ vuint32m8_t test_vamoandei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8 // CHECK-RV64-LABEL: @test_vamoandei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoandei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { @@ -1339,7 +1339,7 @@ vuint32mf2_t test_vamoandei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint6 // CHECK-RV64-LABEL: @test_vamoandei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoandei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { @@ -1350,7 +1350,7 @@ vuint32m1_t test_vamoandei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m // CHECK-RV64-LABEL: @test_vamoandei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoandei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { @@ -1361,7 +1361,7 @@ vuint32m2_t test_vamoandei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m // CHECK-RV64-LABEL: @test_vamoandei64_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoandei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { @@ -1372,7 +1372,7 @@ vuint32m4_t test_vamoandei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8 // CHECK-RV64-LABEL: @test_vamoandei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoandei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { @@ -1383,7 +1383,7 @@ vuint64m1_t test_vamoandei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8 // CHECK-RV64-LABEL: @test_vamoandei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoandei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { @@ -1394,7 +1394,7 @@ vuint64m2_t test_vamoandei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4 // CHECK-RV64-LABEL: @test_vamoandei8_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoandei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { @@ -1405,7 +1405,7 @@ vuint64m4_t test_vamoandei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2 // CHECK-RV64-LABEL: @test_vamoandei8_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoandei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { @@ -1416,7 +1416,7 @@ vuint64m8_t test_vamoandei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t // CHECK-RV64-LABEL: @test_vamoandei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoandei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { @@ -1427,7 +1427,7 @@ vuint64m1_t test_vamoandei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16m // CHECK-RV64-LABEL: @test_vamoandei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoandei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { @@ -1438,7 +1438,7 @@ vuint64m2_t test_vamoandei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16m // CHECK-RV64-LABEL: @test_vamoandei16_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoandei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { @@ -1449,7 +1449,7 @@ vuint64m4_t test_vamoandei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m // CHECK-RV64-LABEL: @test_vamoandei16_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoandei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { @@ -1460,7 +1460,7 @@ vuint64m8_t test_vamoandei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2 // CHECK-RV64-LABEL: @test_vamoandei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoandei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { @@ -1471,7 +1471,7 @@ vuint64m1_t test_vamoandei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32m // CHECK-RV64-LABEL: @test_vamoandei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoandei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { @@ -1482,7 +1482,7 @@ vuint64m2_t test_vamoandei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m // CHECK-RV64-LABEL: @test_vamoandei32_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoandei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { @@ -1493,7 +1493,7 @@ vuint64m4_t test_vamoandei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m // CHECK-RV64-LABEL: @test_vamoandei32_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoandei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { @@ -1504,7 +1504,7 @@ vuint64m8_t test_vamoandei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4 // CHECK-RV64-LABEL: @test_vamoandei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoandei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { @@ -1515,7 +1515,7 @@ vuint64m1_t test_vamoandei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m // CHECK-RV64-LABEL: @test_vamoandei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoandei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { @@ -1526,7 +1526,7 @@ vuint64m2_t test_vamoandei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m // CHECK-RV64-LABEL: @test_vamoandei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoandei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { @@ -1537,7 +1537,7 @@ vuint64m4_t test_vamoandei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m // CHECK-RV64-LABEL: @test_vamoandei64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoandei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomax.c index 35f11cc..4863c11b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomax.c @@ -778,7 +778,7 @@ vuint64m8_t test_vamomaxuei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint // CHECK-RV64-LABEL: @test_vamomaxei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamomaxei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { @@ -789,7 +789,7 @@ vint32mf2_t test_vamomaxei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8 // CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamomaxei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { @@ -800,7 +800,7 @@ vint32m1_t test_vamomaxei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t // CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamomaxei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { @@ -811,7 +811,7 @@ vint32m2_t test_vamomaxei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t // CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamomaxei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { @@ -822,7 +822,7 @@ vint32m4_t test_vamomaxei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t b // CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamomaxei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { @@ -833,7 +833,7 @@ vint32m8_t test_vamomaxei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t b // CHECK-RV64-LABEL: @test_vamomaxei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamomaxei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { @@ -844,7 +844,7 @@ vint32mf2_t test_vamomaxei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16m // CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamomaxei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { @@ -855,7 +855,7 @@ vint32m1_t test_vamomaxei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2 // CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamomaxei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { @@ -866,7 +866,7 @@ vint32m2_t test_vamomaxei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_ // CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamomaxei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { @@ -877,7 +877,7 @@ vint32m4_t test_vamomaxei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t // CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamomaxei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { @@ -888,7 +888,7 @@ vint32m8_t test_vamomaxei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t // CHECK-RV64-LABEL: @test_vamomaxei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamomaxei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { @@ -899,7 +899,7 @@ vint32mf2_t test_vamomaxei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32m // CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamomaxei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { @@ -910,7 +910,7 @@ vint32m1_t test_vamomaxei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_ // CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamomaxei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { @@ -921,7 +921,7 @@ vint32m2_t test_vamomaxei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_ // CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamomaxei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { @@ -932,7 +932,7 @@ vint32m4_t test_vamomaxei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t // CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamomaxei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { @@ -943,7 +943,7 @@ vint32m8_t test_vamomaxei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t // CHECK-RV64-LABEL: @test_vamomaxei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamomaxei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { @@ -954,7 +954,7 @@ vint32mf2_t test_vamomaxei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m // CHECK-RV64-LABEL: @test_vamomaxei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamomaxei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { @@ -965,7 +965,7 @@ vint32m1_t test_vamomaxei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_ // CHECK-RV64-LABEL: @test_vamomaxei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamomaxei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { @@ -976,7 +976,7 @@ vint32m2_t test_vamomaxei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_ // CHECK-RV64-LABEL: @test_vamomaxei64_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamomaxei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { @@ -987,7 +987,7 @@ vint32m4_t test_vamomaxei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t // CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamomaxei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { @@ -998,7 +998,7 @@ vint64m1_t test_vamomaxei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t // CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamomaxei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { @@ -1009,7 +1009,7 @@ vint64m2_t test_vamomaxei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t // CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamomaxei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { @@ -1020,7 +1020,7 @@ vint64m4_t test_vamomaxei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t // CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamomaxei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { @@ -1031,7 +1031,7 @@ vint64m8_t test_vamomaxei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t b // CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamomaxei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { @@ -1042,7 +1042,7 @@ vint64m1_t test_vamomaxei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4 // CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamomaxei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { @@ -1053,7 +1053,7 @@ vint64m2_t test_vamomaxei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2 // CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamomaxei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { @@ -1064,7 +1064,7 @@ vint64m4_t test_vamomaxei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_ // CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamomaxei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { @@ -1075,7 +1075,7 @@ vint64m8_t test_vamomaxei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t // CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamomaxei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { @@ -1086,7 +1086,7 @@ vint64m1_t test_vamomaxei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2 // CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamomaxei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { @@ -1097,7 +1097,7 @@ vint64m2_t test_vamomaxei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_ // CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamomaxei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { @@ -1108,7 +1108,7 @@ vint64m4_t test_vamomaxei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_ // CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamomaxei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { @@ -1119,7 +1119,7 @@ vint64m8_t test_vamomaxei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t // CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamomaxei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { @@ -1130,7 +1130,7 @@ vint64m1_t test_vamomaxei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_ // CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamomaxei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { @@ -1141,7 +1141,7 @@ vint64m2_t test_vamomaxei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_ // CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamomaxei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { @@ -1152,7 +1152,7 @@ vint64m4_t test_vamomaxei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_ // CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamomaxei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { @@ -1163,7 +1163,7 @@ vint64m8_t test_vamomaxei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamomaxuei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { @@ -1174,7 +1174,7 @@ vuint32mf2_t test_vamomaxuei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8 // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamomaxuei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { @@ -1185,7 +1185,7 @@ vuint32m1_t test_vamomaxuei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamomaxuei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { @@ -1196,7 +1196,7 @@ vuint32m2_t test_vamomaxuei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamomaxuei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { @@ -1207,7 +1207,7 @@ vuint32m4_t test_vamomaxuei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_ // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamomaxuei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { @@ -1218,7 +1218,7 @@ vuint32m8_t test_vamomaxuei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_ // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamomaxuei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { @@ -1229,7 +1229,7 @@ vuint32mf2_t test_vamomaxuei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamomaxuei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { @@ -1240,7 +1240,7 @@ vuint32m1_t test_vamomaxuei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16 // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamomaxuei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { @@ -1251,7 +1251,7 @@ vuint32m2_t test_vamomaxuei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16 // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamomaxuei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { @@ -1262,7 +1262,7 @@ vuint32m4_t test_vamomaxuei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamomaxuei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { @@ -1273,7 +1273,7 @@ vuint32m8_t test_vamomaxuei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamomaxuei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { @@ -1284,7 +1284,7 @@ vuint32mf2_t test_vamomaxuei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamomaxuei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { @@ -1295,7 +1295,7 @@ vuint32m1_t test_vamomaxuei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32 // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamomaxuei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { @@ -1306,7 +1306,7 @@ vuint32m2_t test_vamomaxuei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32 // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamomaxuei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { @@ -1317,7 +1317,7 @@ vuint32m4_t test_vamomaxuei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamomaxuei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { @@ -1328,7 +1328,7 @@ vuint32m8_t test_vamomaxuei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamomaxuei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { @@ -1339,7 +1339,7 @@ vuint32mf2_t test_vamomaxuei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamomaxuei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { @@ -1350,7 +1350,7 @@ vuint32m1_t test_vamomaxuei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64 // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamomaxuei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { @@ -1361,7 +1361,7 @@ vuint32m2_t test_vamomaxuei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64 // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamomaxuei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { @@ -1372,7 +1372,7 @@ vuint32m4_t test_vamomaxuei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamomaxuei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { @@ -1383,7 +1383,7 @@ vuint64m1_t test_vamomaxuei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamomaxuei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { @@ -1394,7 +1394,7 @@ vuint64m2_t test_vamomaxuei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamomaxuei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { @@ -1405,7 +1405,7 @@ vuint64m4_t test_vamomaxuei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamomaxuei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { @@ -1416,7 +1416,7 @@ vuint64m8_t test_vamomaxuei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_ // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamomaxuei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { @@ -1427,7 +1427,7 @@ vuint64m1_t test_vamomaxuei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16 // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamomaxuei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { @@ -1438,7 +1438,7 @@ vuint64m2_t test_vamomaxuei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16 // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamomaxuei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { @@ -1449,7 +1449,7 @@ vuint64m4_t test_vamomaxuei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16 // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamomaxuei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { @@ -1460,7 +1460,7 @@ vuint64m8_t test_vamomaxuei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamomaxuei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { @@ -1471,7 +1471,7 @@ vuint64m1_t test_vamomaxuei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32 // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamomaxuei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { @@ -1482,7 +1482,7 @@ vuint64m2_t test_vamomaxuei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32 // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamomaxuei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { @@ -1493,7 +1493,7 @@ vuint64m4_t test_vamomaxuei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32 // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamomaxuei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { @@ -1504,7 +1504,7 @@ vuint64m8_t test_vamomaxuei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamomaxuei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { @@ -1515,7 +1515,7 @@ vuint64m1_t test_vamomaxuei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64 // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamomaxuei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { @@ -1526,7 +1526,7 @@ vuint64m2_t test_vamomaxuei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64 // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamomaxuei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { @@ -1537,7 +1537,7 @@ vuint64m4_t test_vamomaxuei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64 // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamomaxuei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomin.c index f875de1..eba57e5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomin.c @@ -778,7 +778,7 @@ vuint64m8_t test_vamominuei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint // CHECK-RV64-LABEL: @test_vamominei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamominei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { @@ -789,7 +789,7 @@ vint32mf2_t test_vamominei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8 // CHECK-RV64-LABEL: @test_vamominei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamominei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { @@ -800,7 +800,7 @@ vint32m1_t test_vamominei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t // CHECK-RV64-LABEL: @test_vamominei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamominei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { @@ -811,7 +811,7 @@ vint32m2_t test_vamominei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t // CHECK-RV64-LABEL: @test_vamominei8_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamominei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { @@ -822,7 +822,7 @@ vint32m4_t test_vamominei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t b // CHECK-RV64-LABEL: @test_vamominei8_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamominei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { @@ -833,7 +833,7 @@ vint32m8_t test_vamominei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t b // CHECK-RV64-LABEL: @test_vamominei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamominei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { @@ -844,7 +844,7 @@ vint32mf2_t test_vamominei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16m // CHECK-RV64-LABEL: @test_vamominei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamominei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { @@ -855,7 +855,7 @@ vint32m1_t test_vamominei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2 // CHECK-RV64-LABEL: @test_vamominei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamominei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { @@ -866,7 +866,7 @@ vint32m2_t test_vamominei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_ // CHECK-RV64-LABEL: @test_vamominei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamominei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { @@ -877,7 +877,7 @@ vint32m4_t test_vamominei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t // CHECK-RV64-LABEL: @test_vamominei16_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamominei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { @@ -888,7 +888,7 @@ vint32m8_t test_vamominei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t // CHECK-RV64-LABEL: @test_vamominei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamominei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { @@ -899,7 +899,7 @@ vint32mf2_t test_vamominei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32m // CHECK-RV64-LABEL: @test_vamominei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamominei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { @@ -910,7 +910,7 @@ vint32m1_t test_vamominei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_ // CHECK-RV64-LABEL: @test_vamominei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamominei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { @@ -921,7 +921,7 @@ vint32m2_t test_vamominei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_ // CHECK-RV64-LABEL: @test_vamominei32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamominei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { @@ -932,7 +932,7 @@ vint32m4_t test_vamominei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t // CHECK-RV64-LABEL: @test_vamominei32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamominei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { @@ -943,7 +943,7 @@ vint32m8_t test_vamominei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t // CHECK-RV64-LABEL: @test_vamominei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamominei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { @@ -954,7 +954,7 @@ vint32mf2_t test_vamominei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m // CHECK-RV64-LABEL: @test_vamominei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamominei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { @@ -965,7 +965,7 @@ vint32m1_t test_vamominei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_ // CHECK-RV64-LABEL: @test_vamominei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamominei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { @@ -976,7 +976,7 @@ vint32m2_t test_vamominei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_ // CHECK-RV64-LABEL: @test_vamominei64_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamominei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { @@ -987,7 +987,7 @@ vint32m4_t test_vamominei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t // CHECK-RV64-LABEL: @test_vamominei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamominei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { @@ -998,7 +998,7 @@ vint64m1_t test_vamominei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t // CHECK-RV64-LABEL: @test_vamominei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamominei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { @@ -1009,7 +1009,7 @@ vint64m2_t test_vamominei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t // CHECK-RV64-LABEL: @test_vamominei8_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamominei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { @@ -1020,7 +1020,7 @@ vint64m4_t test_vamominei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t // CHECK-RV64-LABEL: @test_vamominei8_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamominei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { @@ -1031,7 +1031,7 @@ vint64m8_t test_vamominei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t b // CHECK-RV64-LABEL: @test_vamominei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamominei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { @@ -1042,7 +1042,7 @@ vint64m1_t test_vamominei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4 // CHECK-RV64-LABEL: @test_vamominei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamominei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { @@ -1053,7 +1053,7 @@ vint64m2_t test_vamominei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2 // CHECK-RV64-LABEL: @test_vamominei16_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamominei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { @@ -1064,7 +1064,7 @@ vint64m4_t test_vamominei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_ // CHECK-RV64-LABEL: @test_vamominei16_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamominei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { @@ -1075,7 +1075,7 @@ vint64m8_t test_vamominei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t // CHECK-RV64-LABEL: @test_vamominei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamominei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { @@ -1086,7 +1086,7 @@ vint64m1_t test_vamominei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2 // CHECK-RV64-LABEL: @test_vamominei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamominei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { @@ -1097,7 +1097,7 @@ vint64m2_t test_vamominei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_ // CHECK-RV64-LABEL: @test_vamominei32_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamominei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { @@ -1108,7 +1108,7 @@ vint64m4_t test_vamominei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_ // CHECK-RV64-LABEL: @test_vamominei32_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamominei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { @@ -1119,7 +1119,7 @@ vint64m8_t test_vamominei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t // CHECK-RV64-LABEL: @test_vamominei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamominei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { @@ -1130,7 +1130,7 @@ vint64m1_t test_vamominei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_ // CHECK-RV64-LABEL: @test_vamominei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamominei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { @@ -1141,7 +1141,7 @@ vint64m2_t test_vamominei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_ // CHECK-RV64-LABEL: @test_vamominei64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamominei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { @@ -1152,7 +1152,7 @@ vint64m4_t test_vamominei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_ // CHECK-RV64-LABEL: @test_vamominei64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamominei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { @@ -1163,7 +1163,7 @@ vint64m8_t test_vamominei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t // CHECK-RV64-LABEL: @test_vamominuei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamominuei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { @@ -1174,7 +1174,7 @@ vuint32mf2_t test_vamominuei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8 // CHECK-RV64-LABEL: @test_vamominuei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamominuei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { @@ -1185,7 +1185,7 @@ vuint32m1_t test_vamominuei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf // CHECK-RV64-LABEL: @test_vamominuei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamominuei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { @@ -1196,7 +1196,7 @@ vuint32m2_t test_vamominuei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf // CHECK-RV64-LABEL: @test_vamominuei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamominuei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { @@ -1207,7 +1207,7 @@ vuint32m4_t test_vamominuei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_ // CHECK-RV64-LABEL: @test_vamominuei8_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamominuei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { @@ -1218,7 +1218,7 @@ vuint32m8_t test_vamominuei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_ // CHECK-RV64-LABEL: @test_vamominuei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamominuei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { @@ -1229,7 +1229,7 @@ vuint32mf2_t test_vamominuei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint // CHECK-RV64-LABEL: @test_vamominuei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamominuei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { @@ -1240,7 +1240,7 @@ vuint32m1_t test_vamominuei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16 // CHECK-RV64-LABEL: @test_vamominuei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamominuei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { @@ -1251,7 +1251,7 @@ vuint32m2_t test_vamominuei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16 // CHECK-RV64-LABEL: @test_vamominuei16_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamominuei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { @@ -1262,7 +1262,7 @@ vuint32m4_t test_vamominuei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m // CHECK-RV64-LABEL: @test_vamominuei16_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamominuei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { @@ -1273,7 +1273,7 @@ vuint32m8_t test_vamominuei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m // CHECK-RV64-LABEL: @test_vamominuei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamominuei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { @@ -1284,7 +1284,7 @@ vuint32mf2_t test_vamominuei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint // CHECK-RV64-LABEL: @test_vamominuei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamominuei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { @@ -1295,7 +1295,7 @@ vuint32m1_t test_vamominuei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32 // CHECK-RV64-LABEL: @test_vamominuei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamominuei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { @@ -1306,7 +1306,7 @@ vuint32m2_t test_vamominuei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32 // CHECK-RV64-LABEL: @test_vamominuei32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamominuei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { @@ -1317,7 +1317,7 @@ vuint32m4_t test_vamominuei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m // CHECK-RV64-LABEL: @test_vamominuei32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamominuei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { @@ -1328,7 +1328,7 @@ vuint32m8_t test_vamominuei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m // CHECK-RV64-LABEL: @test_vamominuei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamominuei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { @@ -1339,7 +1339,7 @@ vuint32mf2_t test_vamominuei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint // CHECK-RV64-LABEL: @test_vamominuei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamominuei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { @@ -1350,7 +1350,7 @@ vuint32m1_t test_vamominuei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64 // CHECK-RV64-LABEL: @test_vamominuei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamominuei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { @@ -1361,7 +1361,7 @@ vuint32m2_t test_vamominuei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64 // CHECK-RV64-LABEL: @test_vamominuei64_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamominuei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { @@ -1372,7 +1372,7 @@ vuint32m4_t test_vamominuei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m // CHECK-RV64-LABEL: @test_vamominuei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamominuei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { @@ -1383,7 +1383,7 @@ vuint64m1_t test_vamominuei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf // CHECK-RV64-LABEL: @test_vamominuei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamominuei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { @@ -1394,7 +1394,7 @@ vuint64m2_t test_vamominuei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf // CHECK-RV64-LABEL: @test_vamominuei8_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamominuei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { @@ -1405,7 +1405,7 @@ vuint64m4_t test_vamominuei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf // CHECK-RV64-LABEL: @test_vamominuei8_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamominuei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { @@ -1416,7 +1416,7 @@ vuint64m8_t test_vamominuei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_ // CHECK-RV64-LABEL: @test_vamominuei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamominuei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { @@ -1427,7 +1427,7 @@ vuint64m1_t test_vamominuei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16 // CHECK-RV64-LABEL: @test_vamominuei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamominuei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { @@ -1438,7 +1438,7 @@ vuint64m2_t test_vamominuei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16 // CHECK-RV64-LABEL: @test_vamominuei16_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamominuei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { @@ -1449,7 +1449,7 @@ vuint64m4_t test_vamominuei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16 // CHECK-RV64-LABEL: @test_vamominuei16_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamominuei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { @@ -1460,7 +1460,7 @@ vuint64m8_t test_vamominuei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m // CHECK-RV64-LABEL: @test_vamominuei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamominuei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { @@ -1471,7 +1471,7 @@ vuint64m1_t test_vamominuei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32 // CHECK-RV64-LABEL: @test_vamominuei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamominuei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { @@ -1482,7 +1482,7 @@ vuint64m2_t test_vamominuei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32 // CHECK-RV64-LABEL: @test_vamominuei32_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamominuei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { @@ -1493,7 +1493,7 @@ vuint64m4_t test_vamominuei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32 // CHECK-RV64-LABEL: @test_vamominuei32_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamominuei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { @@ -1504,7 +1504,7 @@ vuint64m8_t test_vamominuei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m // CHECK-RV64-LABEL: @test_vamominuei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamominuei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { @@ -1515,7 +1515,7 @@ vuint64m1_t test_vamominuei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64 // CHECK-RV64-LABEL: @test_vamominuei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamominuei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { @@ -1526,7 +1526,7 @@ vuint64m2_t test_vamominuei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64 // CHECK-RV64-LABEL: @test_vamominuei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamominuei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { @@ -1537,7 +1537,7 @@ vuint64m4_t test_vamominuei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64 // CHECK-RV64-LABEL: @test_vamominuei64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamominuei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoor.c index 379f154..56cc72e0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoor.c @@ -778,7 +778,7 @@ vuint64m8_t test_vamoorei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64 // CHECK-RV64-LABEL: @test_vamoorei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoorei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { @@ -789,7 +789,7 @@ vint32mf2_t test_vamoorei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_ // CHECK-RV64-LABEL: @test_vamoorei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoorei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { @@ -800,7 +800,7 @@ vint32m1_t test_vamoorei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t // CHECK-RV64-LABEL: @test_vamoorei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoorei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { @@ -811,7 +811,7 @@ vint32m2_t test_vamoorei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t // CHECK-RV64-LABEL: @test_vamoorei8_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoorei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { @@ -822,7 +822,7 @@ vint32m4_t test_vamoorei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bi // CHECK-RV64-LABEL: @test_vamoorei8_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamoorei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { @@ -833,7 +833,7 @@ vint32m8_t test_vamoorei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bi // CHECK-RV64-LABEL: @test_vamoorei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoorei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { @@ -844,7 +844,7 @@ vint32mf2_t test_vamoorei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf // CHECK-RV64-LABEL: @test_vamoorei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoorei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { @@ -855,7 +855,7 @@ vint32m1_t test_vamoorei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_ // CHECK-RV64-LABEL: @test_vamoorei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoorei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { @@ -866,7 +866,7 @@ vint32m2_t test_vamoorei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t // CHECK-RV64-LABEL: @test_vamoorei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoorei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { @@ -877,7 +877,7 @@ vint32m4_t test_vamoorei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t // CHECK-RV64-LABEL: @test_vamoorei16_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamoorei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { @@ -888,7 +888,7 @@ vint32m8_t test_vamoorei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t // CHECK-RV64-LABEL: @test_vamoorei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoorei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { @@ -899,7 +899,7 @@ vint32mf2_t test_vamoorei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf // CHECK-RV64-LABEL: @test_vamoorei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoorei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { @@ -910,7 +910,7 @@ vint32m1_t test_vamoorei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t // CHECK-RV64-LABEL: @test_vamoorei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoorei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { @@ -921,7 +921,7 @@ vint32m2_t test_vamoorei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t // CHECK-RV64-LABEL: @test_vamoorei32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoorei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { @@ -932,7 +932,7 @@ vint32m4_t test_vamoorei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t // CHECK-RV64-LABEL: @test_vamoorei32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamoorei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { @@ -943,7 +943,7 @@ vint32m8_t test_vamoorei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t // CHECK-RV64-LABEL: @test_vamoorei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoorei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { @@ -954,7 +954,7 @@ vint32mf2_t test_vamoorei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1 // CHECK-RV64-LABEL: @test_vamoorei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoorei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { @@ -965,7 +965,7 @@ vint32m1_t test_vamoorei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t // CHECK-RV64-LABEL: @test_vamoorei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoorei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { @@ -976,7 +976,7 @@ vint32m2_t test_vamoorei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t // CHECK-RV64-LABEL: @test_vamoorei64_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoorei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { @@ -987,7 +987,7 @@ vint32m4_t test_vamoorei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t // CHECK-RV64-LABEL: @test_vamoorei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoorei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { @@ -998,7 +998,7 @@ vint64m1_t test_vamoorei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t // CHECK-RV64-LABEL: @test_vamoorei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoorei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { @@ -1009,7 +1009,7 @@ vint64m2_t test_vamoorei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t // CHECK-RV64-LABEL: @test_vamoorei8_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoorei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { @@ -1020,7 +1020,7 @@ vint64m4_t test_vamoorei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t // CHECK-RV64-LABEL: @test_vamoorei8_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoorei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { @@ -1031,7 +1031,7 @@ vint64m8_t test_vamoorei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bi // CHECK-RV64-LABEL: @test_vamoorei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoorei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { @@ -1042,7 +1042,7 @@ vint64m1_t test_vamoorei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_ // CHECK-RV64-LABEL: @test_vamoorei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoorei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { @@ -1053,7 +1053,7 @@ vint64m2_t test_vamoorei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_ // CHECK-RV64-LABEL: @test_vamoorei16_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoorei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { @@ -1064,7 +1064,7 @@ vint64m4_t test_vamoorei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t // CHECK-RV64-LABEL: @test_vamoorei16_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoorei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { @@ -1075,7 +1075,7 @@ vint64m8_t test_vamoorei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t // CHECK-RV64-LABEL: @test_vamoorei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoorei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { @@ -1086,7 +1086,7 @@ vint64m1_t test_vamoorei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_ // CHECK-RV64-LABEL: @test_vamoorei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoorei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { @@ -1097,7 +1097,7 @@ vint64m2_t test_vamoorei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t // CHECK-RV64-LABEL: @test_vamoorei32_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoorei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { @@ -1108,7 +1108,7 @@ vint64m4_t test_vamoorei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t // CHECK-RV64-LABEL: @test_vamoorei32_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoorei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { @@ -1119,7 +1119,7 @@ vint64m8_t test_vamoorei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t // CHECK-RV64-LABEL: @test_vamoorei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoorei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { @@ -1130,7 +1130,7 @@ vint64m1_t test_vamoorei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t // CHECK-RV64-LABEL: @test_vamoorei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoorei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { @@ -1141,7 +1141,7 @@ vint64m2_t test_vamoorei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t // CHECK-RV64-LABEL: @test_vamoorei64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoorei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { @@ -1152,7 +1152,7 @@ vint64m4_t test_vamoorei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t // CHECK-RV64-LABEL: @test_vamoorei64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoorei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { @@ -1163,7 +1163,7 @@ vint64m8_t test_vamoorei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t // CHECK-RV64-LABEL: @test_vamoorei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoorei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { @@ -1174,7 +1174,7 @@ vuint32mf2_t test_vamoorei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf // CHECK-RV64-LABEL: @test_vamoorei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoorei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { @@ -1185,7 +1185,7 @@ vuint32m1_t test_vamoorei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_ // CHECK-RV64-LABEL: @test_vamoorei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoorei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { @@ -1196,7 +1196,7 @@ vuint32m2_t test_vamoorei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_ // CHECK-RV64-LABEL: @test_vamoorei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoorei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { @@ -1207,7 +1207,7 @@ vuint32m4_t test_vamoorei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t // CHECK-RV64-LABEL: @test_vamoorei8_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamoorei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { @@ -1218,7 +1218,7 @@ vuint32m8_t test_vamoorei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t // CHECK-RV64-LABEL: @test_vamoorei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoorei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { @@ -1229,7 +1229,7 @@ vuint32mf2_t test_vamoorei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16 // CHECK-RV64-LABEL: @test_vamoorei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoorei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { @@ -1240,7 +1240,7 @@ vuint32m1_t test_vamoorei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf // CHECK-RV64-LABEL: @test_vamoorei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoorei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { @@ -1251,7 +1251,7 @@ vuint32m2_t test_vamoorei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1 // CHECK-RV64-LABEL: @test_vamoorei16_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoorei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { @@ -1262,7 +1262,7 @@ vuint32m4_t test_vamoorei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_ // CHECK-RV64-LABEL: @test_vamoorei16_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamoorei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { @@ -1273,7 +1273,7 @@ vuint32m8_t test_vamoorei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_ // CHECK-RV64-LABEL: @test_vamoorei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoorei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { @@ -1284,7 +1284,7 @@ vuint32mf2_t test_vamoorei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32 // CHECK-RV64-LABEL: @test_vamoorei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoorei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { @@ -1295,7 +1295,7 @@ vuint32m1_t test_vamoorei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1 // CHECK-RV64-LABEL: @test_vamoorei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoorei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { @@ -1306,7 +1306,7 @@ vuint32m2_t test_vamoorei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2 // CHECK-RV64-LABEL: @test_vamoorei32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoorei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { @@ -1317,7 +1317,7 @@ vuint32m4_t test_vamoorei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_ // CHECK-RV64-LABEL: @test_vamoorei32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamoorei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { @@ -1328,7 +1328,7 @@ vuint32m8_t test_vamoorei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_ // CHECK-RV64-LABEL: @test_vamoorei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoorei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { @@ -1339,7 +1339,7 @@ vuint32mf2_t test_vamoorei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64 // CHECK-RV64-LABEL: @test_vamoorei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoorei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { @@ -1350,7 +1350,7 @@ vuint32m1_t test_vamoorei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2 // CHECK-RV64-LABEL: @test_vamoorei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoorei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { @@ -1361,7 +1361,7 @@ vuint32m2_t test_vamoorei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4 // CHECK-RV64-LABEL: @test_vamoorei64_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoorei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { @@ -1372,7 +1372,7 @@ vuint32m4_t test_vamoorei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_ // CHECK-RV64-LABEL: @test_vamoorei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoorei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { @@ -1383,7 +1383,7 @@ vuint64m1_t test_vamoorei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_ // CHECK-RV64-LABEL: @test_vamoorei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoorei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { @@ -1394,7 +1394,7 @@ vuint64m2_t test_vamoorei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_ // CHECK-RV64-LABEL: @test_vamoorei8_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoorei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { @@ -1405,7 +1405,7 @@ vuint64m4_t test_vamoorei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_ // CHECK-RV64-LABEL: @test_vamoorei8_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoorei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { @@ -1416,7 +1416,7 @@ vuint64m8_t test_vamoorei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t // CHECK-RV64-LABEL: @test_vamoorei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoorei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { @@ -1427,7 +1427,7 @@ vuint64m1_t test_vamoorei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf // CHECK-RV64-LABEL: @test_vamoorei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoorei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { @@ -1438,7 +1438,7 @@ vuint64m2_t test_vamoorei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf // CHECK-RV64-LABEL: @test_vamoorei16_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoorei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { @@ -1449,7 +1449,7 @@ vuint64m4_t test_vamoorei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1 // CHECK-RV64-LABEL: @test_vamoorei16_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoorei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { @@ -1460,7 +1460,7 @@ vuint64m8_t test_vamoorei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_ // CHECK-RV64-LABEL: @test_vamoorei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoorei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { @@ -1471,7 +1471,7 @@ vuint64m1_t test_vamoorei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf // CHECK-RV64-LABEL: @test_vamoorei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoorei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { @@ -1482,7 +1482,7 @@ vuint64m2_t test_vamoorei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1 // CHECK-RV64-LABEL: @test_vamoorei32_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoorei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { @@ -1493,7 +1493,7 @@ vuint64m4_t test_vamoorei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2 // CHECK-RV64-LABEL: @test_vamoorei32_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoorei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { @@ -1504,7 +1504,7 @@ vuint64m8_t test_vamoorei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_ // CHECK-RV64-LABEL: @test_vamoorei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoorei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { @@ -1515,7 +1515,7 @@ vuint64m1_t test_vamoorei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1 // CHECK-RV64-LABEL: @test_vamoorei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoorei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { @@ -1526,7 +1526,7 @@ vuint64m2_t test_vamoorei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2 // CHECK-RV64-LABEL: @test_vamoorei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoorei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { @@ -1537,7 +1537,7 @@ vuint64m4_t test_vamoorei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4 // CHECK-RV64-LABEL: @test_vamoorei64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoorei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoswap.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoswap.c index fe8957e..9a7d81d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoswap.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoswap.c @@ -1269,7 +1269,7 @@ vfloat64m8_t test_vamoswapei64_v_f64m8(double *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: @test_vamoswapei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoswapei8_v_i32mf2_m(vbool64_t mask, int32_t *base, @@ -1282,7 +1282,7 @@ vint32mf2_t test_vamoswapei8_v_i32mf2_m(vbool64_t mask, int32_t *base, // CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoswapei8_v_i32m1_m(vbool32_t mask, int32_t *base, @@ -1295,7 +1295,7 @@ vint32m1_t test_vamoswapei8_v_i32m1_m(vbool32_t mask, int32_t *base, // CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoswapei8_v_i32m2_m(vbool16_t mask, int32_t *base, @@ -1308,7 +1308,7 @@ vint32m2_t test_vamoswapei8_v_i32m2_m(vbool16_t mask, int32_t *base, // CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoswapei8_v_i32m4_m(vbool8_t mask, int32_t *base, @@ -1321,7 +1321,7 @@ vint32m4_t test_vamoswapei8_v_i32m4_m(vbool8_t mask, int32_t *base, // CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamoswapei8_v_i32m8_m(vbool4_t mask, int32_t *base, @@ -1334,7 +1334,7 @@ vint32m8_t test_vamoswapei8_v_i32m8_m(vbool4_t mask, int32_t *base, // CHECK-RV64-LABEL: @test_vamoswapei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoswapei16_v_i32mf2_m(vbool64_t mask, int32_t *base, @@ -1347,7 +1347,7 @@ vint32mf2_t test_vamoswapei16_v_i32mf2_m(vbool64_t mask, int32_t *base, // CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoswapei16_v_i32m1_m(vbool32_t mask, int32_t *base, @@ -1360,7 +1360,7 @@ vint32m1_t test_vamoswapei16_v_i32m1_m(vbool32_t mask, int32_t *base, // CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoswapei16_v_i32m2_m(vbool16_t mask, int32_t *base, @@ -1373,7 +1373,7 @@ vint32m2_t test_vamoswapei16_v_i32m2_m(vbool16_t mask, int32_t *base, // CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoswapei16_v_i32m4_m(vbool8_t mask, int32_t *base, @@ -1386,7 +1386,7 @@ vint32m4_t test_vamoswapei16_v_i32m4_m(vbool8_t mask, int32_t *base, // CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamoswapei16_v_i32m8_m(vbool4_t mask, int32_t *base, @@ -1399,7 +1399,7 @@ vint32m8_t test_vamoswapei16_v_i32m8_m(vbool4_t mask, int32_t *base, // CHECK-RV64-LABEL: @test_vamoswapei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoswapei32_v_i32mf2_m(vbool64_t mask, int32_t *base, @@ -1412,7 +1412,7 @@ vint32mf2_t test_vamoswapei32_v_i32mf2_m(vbool64_t mask, int32_t *base, // CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoswapei32_v_i32m1_m(vbool32_t mask, int32_t *base, @@ -1425,7 +1425,7 @@ vint32m1_t test_vamoswapei32_v_i32m1_m(vbool32_t mask, int32_t *base, // CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoswapei32_v_i32m2_m(vbool16_t mask, int32_t *base, @@ -1438,7 +1438,7 @@ vint32m2_t test_vamoswapei32_v_i32m2_m(vbool16_t mask, int32_t *base, // CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoswapei32_v_i32m4_m(vbool8_t mask, int32_t *base, @@ -1451,7 +1451,7 @@ vint32m4_t test_vamoswapei32_v_i32m4_m(vbool8_t mask, int32_t *base, // CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamoswapei32_v_i32m8_m(vbool4_t mask, int32_t *base, @@ -1464,7 +1464,7 @@ vint32m8_t test_vamoswapei32_v_i32m8_m(vbool4_t mask, int32_t *base, // CHECK-RV64-LABEL: @test_vamoswapei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoswapei64_v_i32mf2_m(vbool64_t mask, int32_t *base, @@ -1477,7 +1477,7 @@ vint32mf2_t test_vamoswapei64_v_i32mf2_m(vbool64_t mask, int32_t *base, // CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoswapei64_v_i32m1_m(vbool32_t mask, int32_t *base, @@ -1490,7 +1490,7 @@ vint32m1_t test_vamoswapei64_v_i32m1_m(vbool32_t mask, int32_t *base, // CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoswapei64_v_i32m2_m(vbool16_t mask, int32_t *base, @@ -1503,7 +1503,7 @@ vint32m2_t test_vamoswapei64_v_i32m2_m(vbool16_t mask, int32_t *base, // CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoswapei64_v_i32m4_m(vbool8_t mask, int32_t *base, @@ -1516,7 +1516,7 @@ vint32m4_t test_vamoswapei64_v_i32m4_m(vbool8_t mask, int32_t *base, // CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoswapei8_v_i64m1_m(vbool64_t mask, int64_t *base, @@ -1529,7 +1529,7 @@ vint64m1_t test_vamoswapei8_v_i64m1_m(vbool64_t mask, int64_t *base, // CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoswapei8_v_i64m2_m(vbool32_t mask, int64_t *base, @@ -1542,7 +1542,7 @@ vint64m2_t test_vamoswapei8_v_i64m2_m(vbool32_t mask, int64_t *base, // CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoswapei8_v_i64m4_m(vbool16_t mask, int64_t *base, @@ -1555,7 +1555,7 @@ vint64m4_t test_vamoswapei8_v_i64m4_m(vbool16_t mask, int64_t *base, // CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoswapei8_v_i64m8_m(vbool8_t mask, int64_t *base, @@ -1568,7 +1568,7 @@ vint64m8_t test_vamoswapei8_v_i64m8_m(vbool8_t mask, int64_t *base, // CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoswapei16_v_i64m1_m(vbool64_t mask, int64_t *base, @@ -1581,7 +1581,7 @@ vint64m1_t test_vamoswapei16_v_i64m1_m(vbool64_t mask, int64_t *base, // CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoswapei16_v_i64m2_m(vbool32_t mask, int64_t *base, @@ -1594,7 +1594,7 @@ vint64m2_t test_vamoswapei16_v_i64m2_m(vbool32_t mask, int64_t *base, // CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoswapei16_v_i64m4_m(vbool16_t mask, int64_t *base, @@ -1607,7 +1607,7 @@ vint64m4_t test_vamoswapei16_v_i64m4_m(vbool16_t mask, int64_t *base, // CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoswapei16_v_i64m8_m(vbool8_t mask, int64_t *base, @@ -1620,7 +1620,7 @@ vint64m8_t test_vamoswapei16_v_i64m8_m(vbool8_t mask, int64_t *base, // CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoswapei32_v_i64m1_m(vbool64_t mask, int64_t *base, @@ -1633,7 +1633,7 @@ vint64m1_t test_vamoswapei32_v_i64m1_m(vbool64_t mask, int64_t *base, // CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoswapei32_v_i64m2_m(vbool32_t mask, int64_t *base, @@ -1646,7 +1646,7 @@ vint64m2_t test_vamoswapei32_v_i64m2_m(vbool32_t mask, int64_t *base, // CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoswapei32_v_i64m4_m(vbool16_t mask, int64_t *base, @@ -1659,7 +1659,7 @@ vint64m4_t test_vamoswapei32_v_i64m4_m(vbool16_t mask, int64_t *base, // CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoswapei32_v_i64m8_m(vbool8_t mask, int64_t *base, @@ -1672,7 +1672,7 @@ vint64m8_t test_vamoswapei32_v_i64m8_m(vbool8_t mask, int64_t *base, // CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoswapei64_v_i64m1_m(vbool64_t mask, int64_t *base, @@ -1685,7 +1685,7 @@ vint64m1_t test_vamoswapei64_v_i64m1_m(vbool64_t mask, int64_t *base, // CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoswapei64_v_i64m2_m(vbool32_t mask, int64_t *base, @@ -1698,7 +1698,7 @@ vint64m2_t test_vamoswapei64_v_i64m2_m(vbool32_t mask, int64_t *base, // CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoswapei64_v_i64m4_m(vbool16_t mask, int64_t *base, @@ -1711,7 +1711,7 @@ vint64m4_t test_vamoswapei64_v_i64m4_m(vbool16_t mask, int64_t *base, // CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoswapei64_v_i64m8_m(vbool8_t mask, int64_t *base, @@ -1724,7 +1724,7 @@ vint64m8_t test_vamoswapei64_v_i64m8_m(vbool8_t mask, int64_t *base, // CHECK-RV64-LABEL: @test_vamoswapei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoswapei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, @@ -1737,7 +1737,7 @@ vuint32mf2_t test_vamoswapei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, // CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoswapei8_v_u32m1_m(vbool32_t mask, uint32_t *base, @@ -1750,7 +1750,7 @@ vuint32m1_t test_vamoswapei8_v_u32m1_m(vbool32_t mask, uint32_t *base, // CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoswapei8_v_u32m2_m(vbool16_t mask, uint32_t *base, @@ -1763,7 +1763,7 @@ vuint32m2_t test_vamoswapei8_v_u32m2_m(vbool16_t mask, uint32_t *base, // CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoswapei8_v_u32m4_m(vbool8_t mask, uint32_t *base, @@ -1776,7 +1776,7 @@ vuint32m4_t test_vamoswapei8_v_u32m4_m(vbool8_t mask, uint32_t *base, // CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamoswapei8_v_u32m8_m(vbool4_t mask, uint32_t *base, @@ -1789,7 +1789,7 @@ vuint32m8_t test_vamoswapei8_v_u32m8_m(vbool4_t mask, uint32_t *base, // CHECK-RV64-LABEL: @test_vamoswapei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoswapei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, @@ -1802,7 +1802,7 @@ vuint32mf2_t test_vamoswapei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, // CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoswapei16_v_u32m1_m(vbool32_t mask, uint32_t *base, @@ -1815,7 +1815,7 @@ vuint32m1_t test_vamoswapei16_v_u32m1_m(vbool32_t mask, uint32_t *base, // CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoswapei16_v_u32m2_m(vbool16_t mask, uint32_t *base, @@ -1828,7 +1828,7 @@ vuint32m2_t test_vamoswapei16_v_u32m2_m(vbool16_t mask, uint32_t *base, // CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoswapei16_v_u32m4_m(vbool8_t mask, uint32_t *base, @@ -1841,7 +1841,7 @@ vuint32m4_t test_vamoswapei16_v_u32m4_m(vbool8_t mask, uint32_t *base, // CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamoswapei16_v_u32m8_m(vbool4_t mask, uint32_t *base, @@ -1854,7 +1854,7 @@ vuint32m8_t test_vamoswapei16_v_u32m8_m(vbool4_t mask, uint32_t *base, // CHECK-RV64-LABEL: @test_vamoswapei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoswapei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, @@ -1867,7 +1867,7 @@ vuint32mf2_t test_vamoswapei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, // CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoswapei32_v_u32m1_m(vbool32_t mask, uint32_t *base, @@ -1880,7 +1880,7 @@ vuint32m1_t test_vamoswapei32_v_u32m1_m(vbool32_t mask, uint32_t *base, // CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoswapei32_v_u32m2_m(vbool16_t mask, uint32_t *base, @@ -1893,7 +1893,7 @@ vuint32m2_t test_vamoswapei32_v_u32m2_m(vbool16_t mask, uint32_t *base, // CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoswapei32_v_u32m4_m(vbool8_t mask, uint32_t *base, @@ -1906,7 +1906,7 @@ vuint32m4_t test_vamoswapei32_v_u32m4_m(vbool8_t mask, uint32_t *base, // CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamoswapei32_v_u32m8_m(vbool4_t mask, uint32_t *base, @@ -1919,7 +1919,7 @@ vuint32m8_t test_vamoswapei32_v_u32m8_m(vbool4_t mask, uint32_t *base, // CHECK-RV64-LABEL: @test_vamoswapei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoswapei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, @@ -1932,7 +1932,7 @@ vuint32mf2_t test_vamoswapei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, // CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoswapei64_v_u32m1_m(vbool32_t mask, uint32_t *base, @@ -1945,7 +1945,7 @@ vuint32m1_t test_vamoswapei64_v_u32m1_m(vbool32_t mask, uint32_t *base, // CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoswapei64_v_u32m2_m(vbool16_t mask, uint32_t *base, @@ -1958,7 +1958,7 @@ vuint32m2_t test_vamoswapei64_v_u32m2_m(vbool16_t mask, uint32_t *base, // CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoswapei64_v_u32m4_m(vbool8_t mask, uint32_t *base, @@ -1971,7 +1971,7 @@ vuint32m4_t test_vamoswapei64_v_u32m4_m(vbool8_t mask, uint32_t *base, // CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoswapei8_v_u64m1_m(vbool64_t mask, uint64_t *base, @@ -1984,7 +1984,7 @@ vuint64m1_t test_vamoswapei8_v_u64m1_m(vbool64_t mask, uint64_t *base, // CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoswapei8_v_u64m2_m(vbool32_t mask, uint64_t *base, @@ -1997,7 +1997,7 @@ vuint64m2_t test_vamoswapei8_v_u64m2_m(vbool32_t mask, uint64_t *base, // CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoswapei8_v_u64m4_m(vbool16_t mask, uint64_t *base, @@ -2010,7 +2010,7 @@ vuint64m4_t test_vamoswapei8_v_u64m4_m(vbool16_t mask, uint64_t *base, // CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoswapei8_v_u64m8_m(vbool8_t mask, uint64_t *base, @@ -2023,7 +2023,7 @@ vuint64m8_t test_vamoswapei8_v_u64m8_m(vbool8_t mask, uint64_t *base, // CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoswapei16_v_u64m1_m(vbool64_t mask, uint64_t *base, @@ -2036,7 +2036,7 @@ vuint64m1_t test_vamoswapei16_v_u64m1_m(vbool64_t mask, uint64_t *base, // CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoswapei16_v_u64m2_m(vbool32_t mask, uint64_t *base, @@ -2049,7 +2049,7 @@ vuint64m2_t test_vamoswapei16_v_u64m2_m(vbool32_t mask, uint64_t *base, // CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoswapei16_v_u64m4_m(vbool16_t mask, uint64_t *base, @@ -2062,7 +2062,7 @@ vuint64m4_t test_vamoswapei16_v_u64m4_m(vbool16_t mask, uint64_t *base, // CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoswapei16_v_u64m8_m(vbool8_t mask, uint64_t *base, @@ -2075,7 +2075,7 @@ vuint64m8_t test_vamoswapei16_v_u64m8_m(vbool8_t mask, uint64_t *base, // CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoswapei32_v_u64m1_m(vbool64_t mask, uint64_t *base, @@ -2088,7 +2088,7 @@ vuint64m1_t test_vamoswapei32_v_u64m1_m(vbool64_t mask, uint64_t *base, // CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoswapei32_v_u64m2_m(vbool32_t mask, uint64_t *base, @@ -2101,7 +2101,7 @@ vuint64m2_t test_vamoswapei32_v_u64m2_m(vbool32_t mask, uint64_t *base, // CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoswapei32_v_u64m4_m(vbool16_t mask, uint64_t *base, @@ -2114,7 +2114,7 @@ vuint64m4_t test_vamoswapei32_v_u64m4_m(vbool16_t mask, uint64_t *base, // CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoswapei32_v_u64m8_m(vbool8_t mask, uint64_t *base, @@ -2127,7 +2127,7 @@ vuint64m8_t test_vamoswapei32_v_u64m8_m(vbool8_t mask, uint64_t *base, // CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoswapei64_v_u64m1_m(vbool64_t mask, uint64_t *base, @@ -2140,7 +2140,7 @@ vuint64m1_t test_vamoswapei64_v_u64m1_m(vbool64_t mask, uint64_t *base, // CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoswapei64_v_u64m2_m(vbool32_t mask, uint64_t *base, @@ -2153,7 +2153,7 @@ vuint64m2_t test_vamoswapei64_v_u64m2_m(vbool32_t mask, uint64_t *base, // CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoswapei64_v_u64m4_m(vbool16_t mask, uint64_t *base, @@ -2166,7 +2166,7 @@ vuint64m4_t test_vamoswapei64_v_u64m4_m(vbool16_t mask, uint64_t *base, // CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoswapei64_v_u64m8_m(vbool8_t mask, uint64_t *base, @@ -2179,7 +2179,7 @@ vuint64m8_t test_vamoswapei64_v_u64m8_m(vbool8_t mask, uint64_t *base, // CHECK-RV64-LABEL: @test_vamoswapei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vamoswapei8_v_f32mf2_m(vbool64_t mask, float *base, @@ -2192,7 +2192,7 @@ vfloat32mf2_t test_vamoswapei8_v_f32mf2_m(vbool64_t mask, float *base, // CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vamoswapei8_v_f32m1_m(vbool32_t mask, float *base, @@ -2205,7 +2205,7 @@ vfloat32m1_t test_vamoswapei8_v_f32m1_m(vbool32_t mask, float *base, // CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vamoswapei8_v_f32m2_m(vbool16_t mask, float *base, @@ -2218,7 +2218,7 @@ vfloat32m2_t test_vamoswapei8_v_f32m2_m(vbool16_t mask, float *base, // CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vamoswapei8_v_f32m4_m(vbool8_t mask, float *base, @@ -2231,7 +2231,7 @@ vfloat32m4_t test_vamoswapei8_v_f32m4_m(vbool8_t mask, float *base, // CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vamoswapei8_v_f32m8_m(vbool4_t mask, float *base, @@ -2244,7 +2244,7 @@ vfloat32m8_t test_vamoswapei8_v_f32m8_m(vbool4_t mask, float *base, // CHECK-RV64-LABEL: @test_vamoswapei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vamoswapei16_v_f32mf2_m(vbool64_t mask, float *base, @@ -2257,7 +2257,7 @@ vfloat32mf2_t test_vamoswapei16_v_f32mf2_m(vbool64_t mask, float *base, // CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vamoswapei16_v_f32m1_m(vbool32_t mask, float *base, @@ -2270,7 +2270,7 @@ vfloat32m1_t test_vamoswapei16_v_f32m1_m(vbool32_t mask, float *base, // CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vamoswapei16_v_f32m2_m(vbool16_t mask, float *base, @@ -2283,7 +2283,7 @@ vfloat32m2_t test_vamoswapei16_v_f32m2_m(vbool16_t mask, float *base, // CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vamoswapei16_v_f32m4_m(vbool8_t mask, float *base, @@ -2296,7 +2296,7 @@ vfloat32m4_t test_vamoswapei16_v_f32m4_m(vbool8_t mask, float *base, // CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vamoswapei16_v_f32m8_m(vbool4_t mask, float *base, @@ -2309,7 +2309,7 @@ vfloat32m8_t test_vamoswapei16_v_f32m8_m(vbool4_t mask, float *base, // CHECK-RV64-LABEL: @test_vamoswapei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vamoswapei32_v_f32mf2_m(vbool64_t mask, float *base, @@ -2322,7 +2322,7 @@ vfloat32mf2_t test_vamoswapei32_v_f32mf2_m(vbool64_t mask, float *base, // CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vamoswapei32_v_f32m1_m(vbool32_t mask, float *base, @@ -2335,7 +2335,7 @@ vfloat32m1_t test_vamoswapei32_v_f32m1_m(vbool32_t mask, float *base, // CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vamoswapei32_v_f32m2_m(vbool16_t mask, float *base, @@ -2348,7 +2348,7 @@ vfloat32m2_t test_vamoswapei32_v_f32m2_m(vbool16_t mask, float *base, // CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vamoswapei32_v_f32m4_m(vbool8_t mask, float *base, @@ -2361,7 +2361,7 @@ vfloat32m4_t test_vamoswapei32_v_f32m4_m(vbool8_t mask, float *base, // CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vamoswapei32_v_f32m8_m(vbool4_t mask, float *base, @@ -2374,7 +2374,7 @@ vfloat32m8_t test_vamoswapei32_v_f32m8_m(vbool4_t mask, float *base, // CHECK-RV64-LABEL: @test_vamoswapei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vamoswapei64_v_f32mf2_m(vbool64_t mask, float *base, @@ -2387,7 +2387,7 @@ vfloat32mf2_t test_vamoswapei64_v_f32mf2_m(vbool64_t mask, float *base, // CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vamoswapei64_v_f32m1_m(vbool32_t mask, float *base, @@ -2400,7 +2400,7 @@ vfloat32m1_t test_vamoswapei64_v_f32m1_m(vbool32_t mask, float *base, // CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vamoswapei64_v_f32m2_m(vbool16_t mask, float *base, @@ -2413,7 +2413,7 @@ vfloat32m2_t test_vamoswapei64_v_f32m2_m(vbool16_t mask, float *base, // CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vamoswapei64_v_f32m4_m(vbool8_t mask, float *base, @@ -2426,7 +2426,7 @@ vfloat32m4_t test_vamoswapei64_v_f32m4_m(vbool8_t mask, float *base, // CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vamoswapei8_v_f64m1_m(vbool64_t mask, double *base, @@ -2439,7 +2439,7 @@ vfloat64m1_t test_vamoswapei8_v_f64m1_m(vbool64_t mask, double *base, // CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vamoswapei8_v_f64m2_m(vbool32_t mask, double *base, @@ -2452,7 +2452,7 @@ vfloat64m2_t test_vamoswapei8_v_f64m2_m(vbool32_t mask, double *base, // CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vamoswapei8_v_f64m4_m(vbool16_t mask, double *base, @@ -2465,7 +2465,7 @@ vfloat64m4_t test_vamoswapei8_v_f64m4_m(vbool16_t mask, double *base, // CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vamoswapei8_v_f64m8_m(vbool8_t mask, double *base, @@ -2478,7 +2478,7 @@ vfloat64m8_t test_vamoswapei8_v_f64m8_m(vbool8_t mask, double *base, // CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vamoswapei16_v_f64m1_m(vbool64_t mask, double *base, @@ -2491,7 +2491,7 @@ vfloat64m1_t test_vamoswapei16_v_f64m1_m(vbool64_t mask, double *base, // CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vamoswapei16_v_f64m2_m(vbool32_t mask, double *base, @@ -2504,7 +2504,7 @@ vfloat64m2_t test_vamoswapei16_v_f64m2_m(vbool32_t mask, double *base, // CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vamoswapei16_v_f64m4_m(vbool16_t mask, double *base, @@ -2517,7 +2517,7 @@ vfloat64m4_t test_vamoswapei16_v_f64m4_m(vbool16_t mask, double *base, // CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vamoswapei16_v_f64m8_m(vbool8_t mask, double *base, @@ -2530,7 +2530,7 @@ vfloat64m8_t test_vamoswapei16_v_f64m8_m(vbool8_t mask, double *base, // CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vamoswapei32_v_f64m1_m(vbool64_t mask, double *base, @@ -2543,7 +2543,7 @@ vfloat64m1_t test_vamoswapei32_v_f64m1_m(vbool64_t mask, double *base, // CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vamoswapei32_v_f64m2_m(vbool32_t mask, double *base, @@ -2556,7 +2556,7 @@ vfloat64m2_t test_vamoswapei32_v_f64m2_m(vbool32_t mask, double *base, // CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vamoswapei32_v_f64m4_m(vbool16_t mask, double *base, @@ -2569,7 +2569,7 @@ vfloat64m4_t test_vamoswapei32_v_f64m4_m(vbool16_t mask, double *base, // CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vamoswapei32_v_f64m8_m(vbool8_t mask, double *base, @@ -2582,7 +2582,7 @@ vfloat64m8_t test_vamoswapei32_v_f64m8_m(vbool8_t mask, double *base, // CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vamoswapei64_v_f64m1_m(vbool64_t mask, double *base, @@ -2595,7 +2595,7 @@ vfloat64m1_t test_vamoswapei64_v_f64m1_m(vbool64_t mask, double *base, // CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vamoswapei64_v_f64m2_m(vbool32_t mask, double *base, @@ -2608,7 +2608,7 @@ vfloat64m2_t test_vamoswapei64_v_f64m2_m(vbool32_t mask, double *base, // CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vamoswapei64_v_f64m4_m(vbool16_t mask, double *base, @@ -2621,7 +2621,7 @@ vfloat64m4_t test_vamoswapei64_v_f64m4_m(vbool16_t mask, double *base, // CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vamoswapei64_v_f64m8_m(vbool8_t mask, double *base, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoxor.c index b510bf5..26e405e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoxor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoxor.c @@ -778,7 +778,7 @@ vuint64m8_t test_vamoxorei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint6 // CHECK-RV64-LABEL: @test_vamoxorei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoxorei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { @@ -789,7 +789,7 @@ vint32mf2_t test_vamoxorei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8 // CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoxorei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { @@ -800,7 +800,7 @@ vint32m1_t test_vamoxorei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t // CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoxorei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { @@ -811,7 +811,7 @@ vint32m2_t test_vamoxorei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t // CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoxorei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { @@ -822,7 +822,7 @@ vint32m4_t test_vamoxorei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t b // CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamoxorei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { @@ -833,7 +833,7 @@ vint32m8_t test_vamoxorei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t b // CHECK-RV64-LABEL: @test_vamoxorei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoxorei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { @@ -844,7 +844,7 @@ vint32mf2_t test_vamoxorei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16m // CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoxorei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { @@ -855,7 +855,7 @@ vint32m1_t test_vamoxorei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2 // CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoxorei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { @@ -866,7 +866,7 @@ vint32m2_t test_vamoxorei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_ // CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoxorei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { @@ -877,7 +877,7 @@ vint32m4_t test_vamoxorei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t // CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamoxorei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { @@ -888,7 +888,7 @@ vint32m8_t test_vamoxorei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t // CHECK-RV64-LABEL: @test_vamoxorei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoxorei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { @@ -899,7 +899,7 @@ vint32mf2_t test_vamoxorei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32m // CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoxorei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { @@ -910,7 +910,7 @@ vint32m1_t test_vamoxorei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_ // CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoxorei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { @@ -921,7 +921,7 @@ vint32m2_t test_vamoxorei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_ // CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoxorei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { @@ -932,7 +932,7 @@ vint32m4_t test_vamoxorei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t // CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamoxorei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { @@ -943,7 +943,7 @@ vint32m8_t test_vamoxorei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t // CHECK-RV64-LABEL: @test_vamoxorei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoxorei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { @@ -954,7 +954,7 @@ vint32mf2_t test_vamoxorei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m // CHECK-RV64-LABEL: @test_vamoxorei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoxorei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { @@ -965,7 +965,7 @@ vint32m1_t test_vamoxorei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_ // CHECK-RV64-LABEL: @test_vamoxorei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoxorei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { @@ -976,7 +976,7 @@ vint32m2_t test_vamoxorei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_ // CHECK-RV64-LABEL: @test_vamoxorei64_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoxorei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { @@ -987,7 +987,7 @@ vint32m4_t test_vamoxorei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t // CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoxorei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { @@ -998,7 +998,7 @@ vint64m1_t test_vamoxorei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t // CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoxorei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { @@ -1009,7 +1009,7 @@ vint64m2_t test_vamoxorei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t // CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoxorei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { @@ -1020,7 +1020,7 @@ vint64m4_t test_vamoxorei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t // CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoxorei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { @@ -1031,7 +1031,7 @@ vint64m8_t test_vamoxorei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t b // CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoxorei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { @@ -1042,7 +1042,7 @@ vint64m1_t test_vamoxorei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4 // CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoxorei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { @@ -1053,7 +1053,7 @@ vint64m2_t test_vamoxorei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2 // CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoxorei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { @@ -1064,7 +1064,7 @@ vint64m4_t test_vamoxorei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_ // CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoxorei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { @@ -1075,7 +1075,7 @@ vint64m8_t test_vamoxorei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t // CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoxorei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { @@ -1086,7 +1086,7 @@ vint64m1_t test_vamoxorei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2 // CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoxorei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { @@ -1097,7 +1097,7 @@ vint64m2_t test_vamoxorei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_ // CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoxorei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { @@ -1108,7 +1108,7 @@ vint64m4_t test_vamoxorei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_ // CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoxorei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { @@ -1119,7 +1119,7 @@ vint64m8_t test_vamoxorei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t // CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoxorei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { @@ -1130,7 +1130,7 @@ vint64m1_t test_vamoxorei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_ // CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoxorei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { @@ -1141,7 +1141,7 @@ vint64m2_t test_vamoxorei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_ // CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoxorei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { @@ -1152,7 +1152,7 @@ vint64m4_t test_vamoxorei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_ // CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoxorei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { @@ -1163,7 +1163,7 @@ vint64m8_t test_vamoxorei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t // CHECK-RV64-LABEL: @test_vamoxorei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoxorei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { @@ -1174,7 +1174,7 @@ vuint32mf2_t test_vamoxorei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8m // CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoxorei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { @@ -1185,7 +1185,7 @@ vuint32m1_t test_vamoxorei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4 // CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoxorei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { @@ -1196,7 +1196,7 @@ vuint32m2_t test_vamoxorei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2 // CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoxorei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { @@ -1207,7 +1207,7 @@ vuint32m4_t test_vamoxorei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t // CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamoxorei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { @@ -1218,7 +1218,7 @@ vuint32m8_t test_vamoxorei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t // CHECK-RV64-LABEL: @test_vamoxorei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoxorei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { @@ -1229,7 +1229,7 @@ vuint32mf2_t test_vamoxorei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint1 // CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoxorei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { @@ -1240,7 +1240,7 @@ vuint32m1_t test_vamoxorei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16m // CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoxorei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { @@ -1251,7 +1251,7 @@ vuint32m2_t test_vamoxorei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m // CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoxorei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { @@ -1262,7 +1262,7 @@ vuint32m4_t test_vamoxorei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2 // CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamoxorei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { @@ -1273,7 +1273,7 @@ vuint32m8_t test_vamoxorei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4 // CHECK-RV64-LABEL: @test_vamoxorei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoxorei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { @@ -1284,7 +1284,7 @@ vuint32mf2_t test_vamoxorei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint3 // CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoxorei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { @@ -1295,7 +1295,7 @@ vuint32m1_t test_vamoxorei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m // CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoxorei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { @@ -1306,7 +1306,7 @@ vuint32m2_t test_vamoxorei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m // CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoxorei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { @@ -1317,7 +1317,7 @@ vuint32m4_t test_vamoxorei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4 // CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamoxorei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { @@ -1328,7 +1328,7 @@ vuint32m8_t test_vamoxorei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8 // CHECK-RV64-LABEL: @test_vamoxorei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoxorei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { @@ -1339,7 +1339,7 @@ vuint32mf2_t test_vamoxorei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint6 // CHECK-RV64-LABEL: @test_vamoxorei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoxorei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { @@ -1350,7 +1350,7 @@ vuint32m1_t test_vamoxorei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m // CHECK-RV64-LABEL: @test_vamoxorei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoxorei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { @@ -1361,7 +1361,7 @@ vuint32m2_t test_vamoxorei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m // CHECK-RV64-LABEL: @test_vamoxorei64_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoxorei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { @@ -1372,7 +1372,7 @@ vuint32m4_t test_vamoxorei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8 // CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoxorei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { @@ -1383,7 +1383,7 @@ vuint64m1_t test_vamoxorei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8 // CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoxorei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { @@ -1394,7 +1394,7 @@ vuint64m2_t test_vamoxorei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4 // CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoxorei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { @@ -1405,7 +1405,7 @@ vuint64m4_t test_vamoxorei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2 // CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoxorei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { @@ -1416,7 +1416,7 @@ vuint64m8_t test_vamoxorei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t // CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoxorei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { @@ -1427,7 +1427,7 @@ vuint64m1_t test_vamoxorei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16m // CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoxorei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { @@ -1438,7 +1438,7 @@ vuint64m2_t test_vamoxorei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16m // CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoxorei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { @@ -1449,7 +1449,7 @@ vuint64m4_t test_vamoxorei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m // CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoxorei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { @@ -1460,7 +1460,7 @@ vuint64m8_t test_vamoxorei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2 // CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoxorei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { @@ -1471,7 +1471,7 @@ vuint64m1_t test_vamoxorei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32m // CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoxorei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { @@ -1482,7 +1482,7 @@ vuint64m2_t test_vamoxorei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m // CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoxorei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { @@ -1493,7 +1493,7 @@ vuint64m4_t test_vamoxorei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m // CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoxorei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { @@ -1504,7 +1504,7 @@ vuint64m8_t test_vamoxorei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4 // CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoxorei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { @@ -1515,7 +1515,7 @@ vuint64m1_t test_vamoxorei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m // CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoxorei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { @@ -1526,7 +1526,7 @@ vuint64m2_t test_vamoxorei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m // CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoxorei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { @@ -1537,7 +1537,7 @@ vuint64m4_t test_vamoxorei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m // CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoxorei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c index c943e90..c67e269 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c @@ -887,7 +887,7 @@ vuint64m8_t test_vand_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { // // CHECK-RV64-LABEL: @test_vand_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vand_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -897,7 +897,7 @@ vint8mf8_t test_vand_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t // // CHECK-RV64-LABEL: @test_vand_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vand_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { @@ -907,7 +907,7 @@ vint8mf8_t test_vand_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t // // CHECK-RV64-LABEL: @test_vand_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vand_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -917,7 +917,7 @@ vint8mf4_t test_vand_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t // // CHECK-RV64-LABEL: @test_vand_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vand_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { @@ -927,7 +927,7 @@ vint8mf4_t test_vand_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t // // CHECK-RV64-LABEL: @test_vand_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vand_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -937,7 +937,7 @@ vint8mf2_t test_vand_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t // // CHECK-RV64-LABEL: @test_vand_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vand_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { @@ -947,7 +947,7 @@ vint8mf2_t test_vand_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t // // CHECK-RV64-LABEL: @test_vand_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vand_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -957,7 +957,7 @@ vint8m1_t test_vand_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, // // CHECK-RV64-LABEL: @test_vand_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vand_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { @@ -967,7 +967,7 @@ vint8m1_t test_vand_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, // // CHECK-RV64-LABEL: @test_vand_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vand_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -977,7 +977,7 @@ vint8m2_t test_vand_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, // // CHECK-RV64-LABEL: @test_vand_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vand_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { @@ -987,7 +987,7 @@ vint8m2_t test_vand_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, // // CHECK-RV64-LABEL: @test_vand_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vand_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -997,7 +997,7 @@ vint8m4_t test_vand_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, // // CHECK-RV64-LABEL: @test_vand_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vand_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { @@ -1007,7 +1007,7 @@ vint8m4_t test_vand_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, // // CHECK-RV64-LABEL: @test_vand_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vand_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -1017,7 +1017,7 @@ vint8m8_t test_vand_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, // // CHECK-RV64-LABEL: @test_vand_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vand_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { @@ -1027,7 +1027,7 @@ vint8m8_t test_vand_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, // // CHECK-RV64-LABEL: @test_vand_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vand_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -1037,7 +1037,7 @@ vint16mf4_t test_vand_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vand_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vand_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { @@ -1047,7 +1047,7 @@ vint16mf4_t test_vand_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vand_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vand_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -1057,7 +1057,7 @@ vint16mf2_t test_vand_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vand_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vand_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { @@ -1067,7 +1067,7 @@ vint16mf2_t test_vand_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vand_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vand_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -1077,7 +1077,7 @@ vint16m1_t test_vand_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t // // CHECK-RV64-LABEL: @test_vand_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vand_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { @@ -1087,7 +1087,7 @@ vint16m1_t test_vand_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t // // CHECK-RV64-LABEL: @test_vand_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vand_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -1097,7 +1097,7 @@ vint16m2_t test_vand_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // // CHECK-RV64-LABEL: @test_vand_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vand_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { @@ -1107,7 +1107,7 @@ vint16m2_t test_vand_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // // CHECK-RV64-LABEL: @test_vand_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vand_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1117,7 +1117,7 @@ vint16m4_t test_vand_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // // CHECK-RV64-LABEL: @test_vand_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vand_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { @@ -1127,7 +1127,7 @@ vint16m4_t test_vand_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // // CHECK-RV64-LABEL: @test_vand_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vand_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -1137,7 +1137,7 @@ vint16m8_t test_vand_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // // CHECK-RV64-LABEL: @test_vand_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vand_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { @@ -1147,7 +1147,7 @@ vint16m8_t test_vand_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // // CHECK-RV64-LABEL: @test_vand_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vand_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1157,7 +1157,7 @@ vint32mf2_t test_vand_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m // // CHECK-RV64-LABEL: @test_vand_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vand_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { @@ -1167,7 +1167,7 @@ vint32mf2_t test_vand_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m // // CHECK-RV64-LABEL: @test_vand_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vand_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1177,7 +1177,7 @@ vint32m1_t test_vand_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t // // CHECK-RV64-LABEL: @test_vand_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vand_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { @@ -1187,7 +1187,7 @@ vint32m1_t test_vand_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t // // CHECK-RV64-LABEL: @test_vand_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vand_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1197,7 +1197,7 @@ vint32m2_t test_vand_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t // // CHECK-RV64-LABEL: @test_vand_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vand_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { @@ -1207,7 +1207,7 @@ vint32m2_t test_vand_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t // // CHECK-RV64-LABEL: @test_vand_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vand_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1217,7 +1217,7 @@ vint32m4_t test_vand_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // // CHECK-RV64-LABEL: @test_vand_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vand_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { @@ -1227,7 +1227,7 @@ vint32m4_t test_vand_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // // CHECK-RV64-LABEL: @test_vand_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vand_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1237,7 +1237,7 @@ vint32m8_t test_vand_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // // CHECK-RV64-LABEL: @test_vand_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vand_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { @@ -1247,7 +1247,7 @@ vint32m8_t test_vand_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // // CHECK-RV64-LABEL: @test_vand_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vand_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -1257,7 +1257,7 @@ vint64m1_t test_vand_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t // // CHECK-RV64-LABEL: @test_vand_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vand_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { @@ -1267,7 +1267,7 @@ vint64m1_t test_vand_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t // // CHECK-RV64-LABEL: @test_vand_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vand_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -1277,7 +1277,7 @@ vint64m2_t test_vand_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t // // CHECK-RV64-LABEL: @test_vand_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vand_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { @@ -1287,7 +1287,7 @@ vint64m2_t test_vand_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t // // CHECK-RV64-LABEL: @test_vand_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vand_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -1297,7 +1297,7 @@ vint64m4_t test_vand_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t // // CHECK-RV64-LABEL: @test_vand_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vand_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { @@ -1307,7 +1307,7 @@ vint64m4_t test_vand_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t // // CHECK-RV64-LABEL: @test_vand_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vand_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -1317,7 +1317,7 @@ vint64m8_t test_vand_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // // CHECK-RV64-LABEL: @test_vand_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vand_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { @@ -1327,7 +1327,7 @@ vint64m8_t test_vand_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // // CHECK-RV64-LABEL: @test_vand_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vand_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1337,7 +1337,7 @@ vuint8mf8_t test_vand_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf // // CHECK-RV64-LABEL: @test_vand_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vand_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -1347,7 +1347,7 @@ vuint8mf8_t test_vand_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf // // CHECK-RV64-LABEL: @test_vand_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vand_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1357,7 +1357,7 @@ vuint8mf4_t test_vand_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf // // CHECK-RV64-LABEL: @test_vand_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vand_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -1367,7 +1367,7 @@ vuint8mf4_t test_vand_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf // // CHECK-RV64-LABEL: @test_vand_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vand_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1377,7 +1377,7 @@ vuint8mf2_t test_vand_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf // // CHECK-RV64-LABEL: @test_vand_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vand_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -1387,7 +1387,7 @@ vuint8mf2_t test_vand_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf // // CHECK-RV64-LABEL: @test_vand_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vand_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1397,7 +1397,7 @@ vuint8m1_t test_vand_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t o // // CHECK-RV64-LABEL: @test_vand_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vand_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -1407,7 +1407,7 @@ vuint8m1_t test_vand_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t o // // CHECK-RV64-LABEL: @test_vand_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vand_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1417,7 +1417,7 @@ vuint8m2_t test_vand_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t o // // CHECK-RV64-LABEL: @test_vand_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vand_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -1427,7 +1427,7 @@ vuint8m2_t test_vand_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t o // // CHECK-RV64-LABEL: @test_vand_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vand_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -1437,7 +1437,7 @@ vuint8m4_t test_vand_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t o // // CHECK-RV64-LABEL: @test_vand_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vand_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -1447,7 +1447,7 @@ vuint8m4_t test_vand_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t o // // CHECK-RV64-LABEL: @test_vand_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vand_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -1457,7 +1457,7 @@ vuint8m8_t test_vand_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t o // // CHECK-RV64-LABEL: @test_vand_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vand_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -1467,7 +1467,7 @@ vuint8m8_t test_vand_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t o // // CHECK-RV64-LABEL: @test_vand_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vand_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -1477,7 +1477,7 @@ vuint16mf4_t test_vand_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vand_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vand_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -1487,7 +1487,7 @@ vuint16mf4_t test_vand_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vand_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vand_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -1497,7 +1497,7 @@ vuint16mf2_t test_vand_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vand_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vand_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -1507,7 +1507,7 @@ vuint16mf2_t test_vand_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vand_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vand_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -1517,7 +1517,7 @@ vuint16m1_t test_vand_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m // // CHECK-RV64-LABEL: @test_vand_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vand_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -1527,7 +1527,7 @@ vuint16m1_t test_vand_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m // // CHECK-RV64-LABEL: @test_vand_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vand_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -1537,7 +1537,7 @@ vuint16m2_t test_vand_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2 // // CHECK-RV64-LABEL: @test_vand_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vand_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -1547,7 +1547,7 @@ vuint16m2_t test_vand_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2 // // CHECK-RV64-LABEL: @test_vand_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vand_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -1557,7 +1557,7 @@ vuint16m4_t test_vand_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4 // // CHECK-RV64-LABEL: @test_vand_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vand_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -1567,7 +1567,7 @@ vuint16m4_t test_vand_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4 // // CHECK-RV64-LABEL: @test_vand_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vand_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -1577,7 +1577,7 @@ vuint16m8_t test_vand_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8 // // CHECK-RV64-LABEL: @test_vand_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vand_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -1587,7 +1587,7 @@ vuint16m8_t test_vand_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8 // // CHECK-RV64-LABEL: @test_vand_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vand_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1597,7 +1597,7 @@ vuint32mf2_t test_vand_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vand_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vand_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1607,7 +1607,7 @@ vuint32mf2_t test_vand_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vand_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vand_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1617,7 +1617,7 @@ vuint32m1_t test_vand_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vand_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vand_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1627,7 +1627,7 @@ vuint32m1_t test_vand_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vand_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vand_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1637,7 +1637,7 @@ vuint32m2_t test_vand_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vand_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vand_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1647,7 +1647,7 @@ vuint32m2_t test_vand_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vand_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vand_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1657,7 +1657,7 @@ vuint32m4_t test_vand_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4 // // CHECK-RV64-LABEL: @test_vand_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vand_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1667,7 +1667,7 @@ vuint32m4_t test_vand_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4 // // CHECK-RV64-LABEL: @test_vand_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vand_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1677,7 +1677,7 @@ vuint32m8_t test_vand_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8 // // CHECK-RV64-LABEL: @test_vand_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vand_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -1687,7 +1687,7 @@ vuint32m8_t test_vand_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8 // // CHECK-RV64-LABEL: @test_vand_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vand_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -1697,7 +1697,7 @@ vuint64m1_t test_vand_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vand_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vand_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -1707,7 +1707,7 @@ vuint64m1_t test_vand_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vand_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vand_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -1717,7 +1717,7 @@ vuint64m2_t test_vand_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vand_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vand_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -1727,7 +1727,7 @@ vuint64m2_t test_vand_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vand_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vand_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -1737,7 +1737,7 @@ vuint64m4_t test_vand_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vand_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vand_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -1747,7 +1747,7 @@ vuint64m4_t test_vand_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vand_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vand_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1757,7 +1757,7 @@ vuint64m8_t test_vand_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8 // // CHECK-RV64-LABEL: @test_vand_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vand_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c index 4eb6dc1..1695e2b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c @@ -890,7 +890,7 @@ vuint64m8_t test_vasubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { // // CHECK-RV64-LABEL: @test_vasub_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vasub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, @@ -901,7 +901,7 @@ vint8mf8_t test_vasub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, // // CHECK-RV64-LABEL: @test_vasub_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vasub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, @@ -912,7 +912,7 @@ vint8mf8_t test_vasub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, // // CHECK-RV64-LABEL: @test_vasub_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vasub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, @@ -923,7 +923,7 @@ vint8mf4_t test_vasub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vasub_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vasub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, @@ -934,7 +934,7 @@ vint8mf4_t test_vasub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vasub_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vasub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, @@ -945,7 +945,7 @@ vint8mf2_t test_vasub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vasub_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vasub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, @@ -956,7 +956,7 @@ vint8mf2_t test_vasub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vasub_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vasub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, @@ -967,7 +967,7 @@ vint8m1_t test_vasub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vasub_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vasub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, @@ -978,7 +978,7 @@ vint8m1_t test_vasub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vasub_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vasub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, @@ -989,7 +989,7 @@ vint8m2_t test_vasub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vasub_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vasub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, @@ -1000,7 +1000,7 @@ vint8m2_t test_vasub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vasub_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vasub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, @@ -1011,7 +1011,7 @@ vint8m4_t test_vasub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vasub_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vasub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, @@ -1022,7 +1022,7 @@ vint8m4_t test_vasub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vasub_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vasub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, @@ -1033,7 +1033,7 @@ vint8m8_t test_vasub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vasub_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vasub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, @@ -1044,7 +1044,7 @@ vint8m8_t test_vasub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vasub_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vasub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -1056,7 +1056,7 @@ vint16mf4_t test_vasub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vasub_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vasub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -1067,7 +1067,7 @@ vint16mf4_t test_vasub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vasub_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vasub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -1079,7 +1079,7 @@ vint16mf2_t test_vasub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vasub_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vasub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -1090,7 +1090,7 @@ vint16mf2_t test_vasub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vasub_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vasub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -1101,7 +1101,7 @@ vint16m1_t test_vasub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vasub_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vasub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -1112,7 +1112,7 @@ vint16m1_t test_vasub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vasub_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vasub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -1123,7 +1123,7 @@ vint16m2_t test_vasub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vasub_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vasub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -1134,7 +1134,7 @@ vint16m2_t test_vasub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vasub_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vasub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -1145,7 +1145,7 @@ vint16m4_t test_vasub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vasub_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vasub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -1156,7 +1156,7 @@ vint16m4_t test_vasub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vasub_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vasub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -1167,7 +1167,7 @@ vint16m8_t test_vasub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vasub_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vasub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -1178,7 +1178,7 @@ vint16m8_t test_vasub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vasub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -1190,7 +1190,7 @@ vint32mf2_t test_vasub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vasub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -1201,7 +1201,7 @@ vint32mf2_t test_vasub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vasub_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vasub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -1212,7 +1212,7 @@ vint32m1_t test_vasub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vasub_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vasub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -1223,7 +1223,7 @@ vint32m1_t test_vasub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vasub_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vasub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -1234,7 +1234,7 @@ vint32m2_t test_vasub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vasub_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vasub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -1245,7 +1245,7 @@ vint32m2_t test_vasub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vasub_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vasub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -1256,7 +1256,7 @@ vint32m4_t test_vasub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vasub_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vasub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -1267,7 +1267,7 @@ vint32m4_t test_vasub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vasub_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vasub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -1278,7 +1278,7 @@ vint32m8_t test_vasub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vasub_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vasub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -1289,7 +1289,7 @@ vint32m8_t test_vasub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vasub_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vasub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -1300,7 +1300,7 @@ vint64m1_t test_vasub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vasub_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vasub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -1311,7 +1311,7 @@ vint64m1_t test_vasub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vasub_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vasub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -1322,7 +1322,7 @@ vint64m2_t test_vasub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vasub_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vasub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -1333,7 +1333,7 @@ vint64m2_t test_vasub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vasub_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vasub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -1344,7 +1344,7 @@ vint64m4_t test_vasub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vasub_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vasub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -1355,7 +1355,7 @@ vint64m4_t test_vasub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vasub_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vasub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -1366,7 +1366,7 @@ vint64m8_t test_vasub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vasub_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vasub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -1377,7 +1377,7 @@ vint64m8_t test_vasub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vasubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, @@ -1389,7 +1389,7 @@ vuint8mf8_t test_vasubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, // // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vasubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, @@ -1400,7 +1400,7 @@ vuint8mf8_t test_vasubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, // // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vasubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, @@ -1412,7 +1412,7 @@ vuint8mf4_t test_vasubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vasubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, @@ -1423,7 +1423,7 @@ vuint8mf4_t test_vasubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vasubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, @@ -1435,7 +1435,7 @@ vuint8mf2_t test_vasubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vasubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, @@ -1446,7 +1446,7 @@ vuint8mf2_t test_vasubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vasubu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vasubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, @@ -1457,7 +1457,7 @@ vuint8m1_t test_vasubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vasubu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vasubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, @@ -1468,7 +1468,7 @@ vuint8m1_t test_vasubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vasubu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vasubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, @@ -1479,7 +1479,7 @@ vuint8m2_t test_vasubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vasubu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vasubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, @@ -1490,7 +1490,7 @@ vuint8m2_t test_vasubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vasubu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vasubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, @@ -1501,7 +1501,7 @@ vuint8m4_t test_vasubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vasubu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vasubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, @@ -1512,7 +1512,7 @@ vuint8m4_t test_vasubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vasubu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vasubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, @@ -1523,7 +1523,7 @@ vuint8m8_t test_vasubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vasubu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vasubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, @@ -1534,7 +1534,7 @@ vuint8m8_t test_vasubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vasubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -1546,7 +1546,7 @@ vuint16mf4_t test_vasubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vasubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -1558,7 +1558,7 @@ vuint16mf4_t test_vasubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vasubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -1570,7 +1570,7 @@ vuint16mf2_t test_vasubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vasubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -1582,7 +1582,7 @@ vuint16mf2_t test_vasubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vasubu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vasubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -1594,7 +1594,7 @@ vuint16m1_t test_vasubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vasubu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vasubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -1605,7 +1605,7 @@ vuint16m1_t test_vasubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vasubu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vasubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -1617,7 +1617,7 @@ vuint16m2_t test_vasubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vasubu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vasubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -1628,7 +1628,7 @@ vuint16m2_t test_vasubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vasubu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vasubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -1640,7 +1640,7 @@ vuint16m4_t test_vasubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vasubu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vasubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -1651,7 +1651,7 @@ vuint16m4_t test_vasubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vasubu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vasubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -1663,7 +1663,7 @@ vuint16m8_t test_vasubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vasubu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vasubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -1674,7 +1674,7 @@ vuint16m8_t test_vasubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vasubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -1686,7 +1686,7 @@ vuint32mf2_t test_vasubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vasubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -1698,7 +1698,7 @@ vuint32mf2_t test_vasubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vasubu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vasubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -1710,7 +1710,7 @@ vuint32m1_t test_vasubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vasubu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vasubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -1721,7 +1721,7 @@ vuint32m1_t test_vasubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vasubu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vasubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -1733,7 +1733,7 @@ vuint32m2_t test_vasubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vasubu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vasubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -1744,7 +1744,7 @@ vuint32m2_t test_vasubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vasubu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vasubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -1756,7 +1756,7 @@ vuint32m4_t test_vasubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vasubu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vasubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -1767,7 +1767,7 @@ vuint32m4_t test_vasubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vasubu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vasubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -1779,7 +1779,7 @@ vuint32m8_t test_vasubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vasubu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vasubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -1790,7 +1790,7 @@ vuint32m8_t test_vasubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vasubu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vasubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -1802,7 +1802,7 @@ vuint64m1_t test_vasubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vasubu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vasubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -1813,7 +1813,7 @@ vuint64m1_t test_vasubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vasubu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vasubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -1825,7 +1825,7 @@ vuint64m2_t test_vasubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vasubu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vasubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -1836,7 +1836,7 @@ vuint64m2_t test_vasubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vasubu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vasubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -1848,7 +1848,7 @@ vuint64m4_t test_vasubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vasubu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vasubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -1859,7 +1859,7 @@ vuint64m4_t test_vasubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vasubu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vasubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, @@ -1871,7 +1871,7 @@ vuint64m8_t test_vasubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vasubu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vasubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vcompress.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vcompress.c index 049e69f..cd2c0b0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vcompress.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vcompress.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vcompress_vm_i8mf8 (vbool64_t mask, vint8mf8_t dest, vint8mf8_t src, size_t vl) { @@ -16,7 +16,7 @@ vint8mf8_t test_vcompress_vm_i8mf8 (vbool64_t mask, vint8mf8_t dest, vint8mf8_t // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vcompress_vm_i8mf4 (vbool32_t mask, vint8mf4_t dest, vint8mf4_t src, size_t vl) { @@ -25,7 +25,7 @@ vint8mf4_t test_vcompress_vm_i8mf4 (vbool32_t mask, vint8mf4_t dest, vint8mf4_t // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vcompress_vm_i8mf2 (vbool16_t mask, vint8mf2_t dest, vint8mf2_t src, size_t vl) { @@ -34,7 +34,7 @@ vint8mf2_t test_vcompress_vm_i8mf2 (vbool16_t mask, vint8mf2_t dest, vint8mf2_t // CHECK-RV64-LABEL: @test_vcompress_vm_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vcompress_vm_i8m1 (vbool8_t mask, vint8m1_t dest, vint8m1_t src, size_t vl) { @@ -43,7 +43,7 @@ vint8m1_t test_vcompress_vm_i8m1 (vbool8_t mask, vint8m1_t dest, vint8m1_t src, // CHECK-RV64-LABEL: @test_vcompress_vm_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vcompress_vm_i8m2 (vbool4_t mask, vint8m2_t dest, vint8m2_t src, size_t vl) { @@ -52,7 +52,7 @@ vint8m2_t test_vcompress_vm_i8m2 (vbool4_t mask, vint8m2_t dest, vint8m2_t src, // CHECK-RV64-LABEL: @test_vcompress_vm_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vcompress_vm_i8m4 (vbool2_t mask, vint8m4_t dest, vint8m4_t src, size_t vl) { @@ -61,7 +61,7 @@ vint8m4_t test_vcompress_vm_i8m4 (vbool2_t mask, vint8m4_t dest, vint8m4_t src, // CHECK-RV64-LABEL: @test_vcompress_vm_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv64i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv64i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vcompress_vm_i8m8 (vbool1_t mask, vint8m8_t dest, vint8m8_t src, size_t vl) { @@ -70,7 +70,7 @@ vint8m8_t test_vcompress_vm_i8m8 (vbool1_t mask, vint8m8_t dest, vint8m8_t src, // CHECK-RV64-LABEL: @test_vcompress_vm_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vcompress_vm_i16mf4 (vbool64_t mask, vint16mf4_t dest, vint16mf4_t src, size_t vl) { @@ -79,7 +79,7 @@ vint16mf4_t test_vcompress_vm_i16mf4 (vbool64_t mask, vint16mf4_t dest, vint16mf // CHECK-RV64-LABEL: @test_vcompress_vm_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vcompress_vm_i16mf2 (vbool32_t mask, vint16mf2_t dest, vint16mf2_t src, size_t vl) { @@ -88,7 +88,7 @@ vint16mf2_t test_vcompress_vm_i16mf2 (vbool32_t mask, vint16mf2_t dest, vint16mf // CHECK-RV64-LABEL: @test_vcompress_vm_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vcompress_vm_i16m1 (vbool16_t mask, vint16m1_t dest, vint16m1_t src, size_t vl) { @@ -97,7 +97,7 @@ vint16m1_t test_vcompress_vm_i16m1 (vbool16_t mask, vint16m1_t dest, vint16m1_t // CHECK-RV64-LABEL: @test_vcompress_vm_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vcompress_vm_i16m2 (vbool8_t mask, vint16m2_t dest, vint16m2_t src, size_t vl) { @@ -106,7 +106,7 @@ vint16m2_t test_vcompress_vm_i16m2 (vbool8_t mask, vint16m2_t dest, vint16m2_t s // CHECK-RV64-LABEL: @test_vcompress_vm_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vcompress_vm_i16m4 (vbool4_t mask, vint16m4_t dest, vint16m4_t src, size_t vl) { @@ -115,7 +115,7 @@ vint16m4_t test_vcompress_vm_i16m4 (vbool4_t mask, vint16m4_t dest, vint16m4_t s // CHECK-RV64-LABEL: @test_vcompress_vm_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vcompress_vm_i16m8 (vbool2_t mask, vint16m8_t dest, vint16m8_t src, size_t vl) { @@ -124,7 +124,7 @@ vint16m8_t test_vcompress_vm_i16m8 (vbool2_t mask, vint16m8_t dest, vint16m8_t s // CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vcompress_vm_i32mf2 (vbool64_t mask, vint32mf2_t dest, vint32mf2_t src, size_t vl) { @@ -133,7 +133,7 @@ vint32mf2_t test_vcompress_vm_i32mf2 (vbool64_t mask, vint32mf2_t dest, vint32mf // CHECK-RV64-LABEL: @test_vcompress_vm_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vcompress_vm_i32m1 (vbool32_t mask, vint32m1_t dest, vint32m1_t src, size_t vl) { @@ -142,7 +142,7 @@ vint32m1_t test_vcompress_vm_i32m1 (vbool32_t mask, vint32m1_t dest, vint32m1_t // CHECK-RV64-LABEL: @test_vcompress_vm_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vcompress_vm_i32m2 (vbool16_t mask, vint32m2_t dest, vint32m2_t src, size_t vl) { @@ -151,7 +151,7 @@ vint32m2_t test_vcompress_vm_i32m2 (vbool16_t mask, vint32m2_t dest, vint32m2_t // CHECK-RV64-LABEL: @test_vcompress_vm_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vcompress_vm_i32m4 (vbool8_t mask, vint32m4_t dest, vint32m4_t src, size_t vl) { @@ -160,7 +160,7 @@ vint32m4_t test_vcompress_vm_i32m4 (vbool8_t mask, vint32m4_t dest, vint32m4_t s // CHECK-RV64-LABEL: @test_vcompress_vm_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vcompress_vm_i32m8 (vbool4_t mask, vint32m8_t dest, vint32m8_t src, size_t vl) { @@ -169,7 +169,7 @@ vint32m8_t test_vcompress_vm_i32m8 (vbool4_t mask, vint32m8_t dest, vint32m8_t s // CHECK-RV64-LABEL: @test_vcompress_vm_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vcompress_vm_i64m1 (vbool64_t mask, vint64m1_t dest, vint64m1_t src, size_t vl) { @@ -178,7 +178,7 @@ vint64m1_t test_vcompress_vm_i64m1 (vbool64_t mask, vint64m1_t dest, vint64m1_t // CHECK-RV64-LABEL: @test_vcompress_vm_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vcompress_vm_i64m2 (vbool32_t mask, vint64m2_t dest, vint64m2_t src, size_t vl) { @@ -187,7 +187,7 @@ vint64m2_t test_vcompress_vm_i64m2 (vbool32_t mask, vint64m2_t dest, vint64m2_t // CHECK-RV64-LABEL: @test_vcompress_vm_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vcompress_vm_i64m4 (vbool16_t mask, vint64m4_t dest, vint64m4_t src, size_t vl) { @@ -196,7 +196,7 @@ vint64m4_t test_vcompress_vm_i64m4 (vbool16_t mask, vint64m4_t dest, vint64m4_t // CHECK-RV64-LABEL: @test_vcompress_vm_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vcompress_vm_i64m8 (vbool8_t mask, vint64m8_t dest, vint64m8_t src, size_t vl) { @@ -205,7 +205,7 @@ vint64m8_t test_vcompress_vm_i64m8 (vbool8_t mask, vint64m8_t dest, vint64m8_t s // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vcompress_vm_u8mf8 (vbool64_t mask, vuint8mf8_t dest, vuint8mf8_t src, size_t vl) { @@ -214,7 +214,7 @@ vuint8mf8_t test_vcompress_vm_u8mf8 (vbool64_t mask, vuint8mf8_t dest, vuint8mf8 // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vcompress_vm_u8mf4 (vbool32_t mask, vuint8mf4_t dest, vuint8mf4_t src, size_t vl) { @@ -223,7 +223,7 @@ vuint8mf4_t test_vcompress_vm_u8mf4 (vbool32_t mask, vuint8mf4_t dest, vuint8mf4 // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vcompress_vm_u8mf2 (vbool16_t mask, vuint8mf2_t dest, vuint8mf2_t src, size_t vl) { @@ -232,7 +232,7 @@ vuint8mf2_t test_vcompress_vm_u8mf2 (vbool16_t mask, vuint8mf2_t dest, vuint8mf2 // CHECK-RV64-LABEL: @test_vcompress_vm_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vcompress_vm_u8m1 (vbool8_t mask, vuint8m1_t dest, vuint8m1_t src, size_t vl) { @@ -241,7 +241,7 @@ vuint8m1_t test_vcompress_vm_u8m1 (vbool8_t mask, vuint8m1_t dest, vuint8m1_t sr // CHECK-RV64-LABEL: @test_vcompress_vm_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vcompress_vm_u8m2 (vbool4_t mask, vuint8m2_t dest, vuint8m2_t src, size_t vl) { @@ -250,7 +250,7 @@ vuint8m2_t test_vcompress_vm_u8m2 (vbool4_t mask, vuint8m2_t dest, vuint8m2_t sr // CHECK-RV64-LABEL: @test_vcompress_vm_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vcompress_vm_u8m4 (vbool2_t mask, vuint8m4_t dest, vuint8m4_t src, size_t vl) { @@ -259,7 +259,7 @@ vuint8m4_t test_vcompress_vm_u8m4 (vbool2_t mask, vuint8m4_t dest, vuint8m4_t sr // CHECK-RV64-LABEL: @test_vcompress_vm_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv64i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv64i8.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vcompress_vm_u8m8 (vbool1_t mask, vuint8m8_t dest, vuint8m8_t src, size_t vl) { @@ -268,7 +268,7 @@ vuint8m8_t test_vcompress_vm_u8m8 (vbool1_t mask, vuint8m8_t dest, vuint8m8_t sr // CHECK-RV64-LABEL: @test_vcompress_vm_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vcompress_vm_u16mf4 (vbool64_t mask, vuint16mf4_t dest, vuint16mf4_t src, size_t vl) { @@ -277,7 +277,7 @@ vuint16mf4_t test_vcompress_vm_u16mf4 (vbool64_t mask, vuint16mf4_t dest, vuint1 // CHECK-RV64-LABEL: @test_vcompress_vm_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vcompress_vm_u16mf2 (vbool32_t mask, vuint16mf2_t dest, vuint16mf2_t src, size_t vl) { @@ -286,7 +286,7 @@ vuint16mf2_t test_vcompress_vm_u16mf2 (vbool32_t mask, vuint16mf2_t dest, vuint1 // CHECK-RV64-LABEL: @test_vcompress_vm_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vcompress_vm_u16m1 (vbool16_t mask, vuint16m1_t dest, vuint16m1_t src, size_t vl) { @@ -295,7 +295,7 @@ vuint16m1_t test_vcompress_vm_u16m1 (vbool16_t mask, vuint16m1_t dest, vuint16m1 // CHECK-RV64-LABEL: @test_vcompress_vm_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vcompress_vm_u16m2 (vbool8_t mask, vuint16m2_t dest, vuint16m2_t src, size_t vl) { @@ -304,7 +304,7 @@ vuint16m2_t test_vcompress_vm_u16m2 (vbool8_t mask, vuint16m2_t dest, vuint16m2_ // CHECK-RV64-LABEL: @test_vcompress_vm_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vcompress_vm_u16m4 (vbool4_t mask, vuint16m4_t dest, vuint16m4_t src, size_t vl) { @@ -313,7 +313,7 @@ vuint16m4_t test_vcompress_vm_u16m4 (vbool4_t mask, vuint16m4_t dest, vuint16m4_ // CHECK-RV64-LABEL: @test_vcompress_vm_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32i16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vcompress_vm_u16m8 (vbool2_t mask, vuint16m8_t dest, vuint16m8_t src, size_t vl) { @@ -322,7 +322,7 @@ vuint16m8_t test_vcompress_vm_u16m8 (vbool2_t mask, vuint16m8_t dest, vuint16m8_ // CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vcompress_vm_u32mf2 (vbool64_t mask, vuint32mf2_t dest, vuint32mf2_t src, size_t vl) { @@ -331,7 +331,7 @@ vuint32mf2_t test_vcompress_vm_u32mf2 (vbool64_t mask, vuint32mf2_t dest, vuint3 // CHECK-RV64-LABEL: @test_vcompress_vm_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vcompress_vm_u32m1 (vbool32_t mask, vuint32m1_t dest, vuint32m1_t src, size_t vl) { @@ -340,7 +340,7 @@ vuint32m1_t test_vcompress_vm_u32m1 (vbool32_t mask, vuint32m1_t dest, vuint32m1 // CHECK-RV64-LABEL: @test_vcompress_vm_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vcompress_vm_u32m2 (vbool16_t mask, vuint32m2_t dest, vuint32m2_t src, size_t vl) { @@ -349,7 +349,7 @@ vuint32m2_t test_vcompress_vm_u32m2 (vbool16_t mask, vuint32m2_t dest, vuint32m2 // CHECK-RV64-LABEL: @test_vcompress_vm_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vcompress_vm_u32m4 (vbool8_t mask, vuint32m4_t dest, vuint32m4_t src, size_t vl) { @@ -358,7 +358,7 @@ vuint32m4_t test_vcompress_vm_u32m4 (vbool8_t mask, vuint32m4_t dest, vuint32m4_ // CHECK-RV64-LABEL: @test_vcompress_vm_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16i32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vcompress_vm_u32m8 (vbool4_t mask, vuint32m8_t dest, vuint32m8_t src, size_t vl) { @@ -367,7 +367,7 @@ vuint32m8_t test_vcompress_vm_u32m8 (vbool4_t mask, vuint32m8_t dest, vuint32m8_ // CHECK-RV64-LABEL: @test_vcompress_vm_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vcompress_vm_u64m1 (vbool64_t mask, vuint64m1_t dest, vuint64m1_t src, size_t vl) { @@ -376,7 +376,7 @@ vuint64m1_t test_vcompress_vm_u64m1 (vbool64_t mask, vuint64m1_t dest, vuint64m1 // CHECK-RV64-LABEL: @test_vcompress_vm_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vcompress_vm_u64m2 (vbool32_t mask, vuint64m2_t dest, vuint64m2_t src, size_t vl) { @@ -385,7 +385,7 @@ vuint64m2_t test_vcompress_vm_u64m2 (vbool32_t mask, vuint64m2_t dest, vuint64m2 // CHECK-RV64-LABEL: @test_vcompress_vm_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vcompress_vm_u64m4 (vbool16_t mask, vuint64m4_t dest, vuint64m4_t src, size_t vl) { @@ -394,7 +394,7 @@ vuint64m4_t test_vcompress_vm_u64m4 (vbool16_t mask, vuint64m4_t dest, vuint64m4 // CHECK-RV64-LABEL: @test_vcompress_vm_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8i64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vcompress_vm_u64m8 (vbool8_t mask, vuint64m8_t dest, vuint64m8_t src, size_t vl) { @@ -403,7 +403,7 @@ vuint64m8_t test_vcompress_vm_u64m8 (vbool8_t mask, vuint64m8_t dest, vuint64m8_ // CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vcompress_vm_f32mf2 (vbool64_t mask, vfloat32mf2_t dest, vfloat32mf2_t src, size_t vl) { @@ -412,7 +412,7 @@ vfloat32mf2_t test_vcompress_vm_f32mf2 (vbool64_t mask, vfloat32mf2_t dest, vflo // CHECK-RV64-LABEL: @test_vcompress_vm_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vcompress_vm_f32m1 (vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t src, size_t vl) { @@ -421,7 +421,7 @@ vfloat32m1_t test_vcompress_vm_f32m1 (vbool32_t mask, vfloat32m1_t dest, vfloat3 // CHECK-RV64-LABEL: @test_vcompress_vm_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vcompress_vm_f32m2 (vbool16_t mask, vfloat32m2_t dest, vfloat32m2_t src, size_t vl) { @@ -430,7 +430,7 @@ vfloat32m2_t test_vcompress_vm_f32m2 (vbool16_t mask, vfloat32m2_t dest, vfloat3 // CHECK-RV64-LABEL: @test_vcompress_vm_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vcompress_vm_f32m4 (vbool8_t mask, vfloat32m4_t dest, vfloat32m4_t src, size_t vl) { @@ -439,7 +439,7 @@ vfloat32m4_t test_vcompress_vm_f32m4 (vbool8_t mask, vfloat32m4_t dest, vfloat32 // CHECK-RV64-LABEL: @test_vcompress_vm_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vcompress_vm_f32m8 (vbool4_t mask, vfloat32m8_t dest, vfloat32m8_t src, size_t vl) { @@ -448,7 +448,7 @@ vfloat32m8_t test_vcompress_vm_f32m8 (vbool4_t mask, vfloat32m8_t dest, vfloat32 // CHECK-RV64-LABEL: @test_vcompress_vm_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vcompress_vm_f64m1 (vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t src, size_t vl) { @@ -457,7 +457,7 @@ vfloat64m1_t test_vcompress_vm_f64m1 (vbool64_t mask, vfloat64m1_t dest, vfloat6 // CHECK-RV64-LABEL: @test_vcompress_vm_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vcompress_vm_f64m2 (vbool32_t mask, vfloat64m2_t dest, vfloat64m2_t src, size_t vl) { @@ -466,7 +466,7 @@ vfloat64m2_t test_vcompress_vm_f64m2 (vbool32_t mask, vfloat64m2_t dest, vfloat6 // CHECK-RV64-LABEL: @test_vcompress_vm_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vcompress_vm_f64m4 (vbool16_t mask, vfloat64m4_t dest, vfloat64m4_t src, size_t vl) { @@ -475,7 +475,7 @@ vfloat64m4_t test_vcompress_vm_f64m4 (vbool16_t mask, vfloat64m4_t dest, vfloat6 // CHECK-RV64-LABEL: @test_vcompress_vm_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f64.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vcompress_vm_f64m8 (vbool8_t mask, vfloat64m8_t dest, vfloat64m8_t src, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c index edd61e2..8788f07 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c @@ -887,7 +887,7 @@ vuint64m8_t test_vdivu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { // // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vdiv_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -897,7 +897,7 @@ vint8mf8_t test_vdiv_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t // // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vdiv_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { @@ -907,7 +907,7 @@ vint8mf8_t test_vdiv_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t // // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vdiv_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -917,7 +917,7 @@ vint8mf4_t test_vdiv_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t // // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vdiv_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { @@ -927,7 +927,7 @@ vint8mf4_t test_vdiv_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t // // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vdiv_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -937,7 +937,7 @@ vint8mf2_t test_vdiv_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t // // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vdiv_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { @@ -947,7 +947,7 @@ vint8mf2_t test_vdiv_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t // // CHECK-RV64-LABEL: @test_vdiv_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vdiv_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -957,7 +957,7 @@ vint8m1_t test_vdiv_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, // // CHECK-RV64-LABEL: @test_vdiv_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vdiv_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { @@ -967,7 +967,7 @@ vint8m1_t test_vdiv_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, // // CHECK-RV64-LABEL: @test_vdiv_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vdiv_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -977,7 +977,7 @@ vint8m2_t test_vdiv_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, // // CHECK-RV64-LABEL: @test_vdiv_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vdiv_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { @@ -987,7 +987,7 @@ vint8m2_t test_vdiv_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, // // CHECK-RV64-LABEL: @test_vdiv_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vdiv_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -997,7 +997,7 @@ vint8m4_t test_vdiv_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, // // CHECK-RV64-LABEL: @test_vdiv_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vdiv_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { @@ -1007,7 +1007,7 @@ vint8m4_t test_vdiv_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, // // CHECK-RV64-LABEL: @test_vdiv_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vdiv_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -1017,7 +1017,7 @@ vint8m8_t test_vdiv_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, // // CHECK-RV64-LABEL: @test_vdiv_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vdiv_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { @@ -1027,7 +1027,7 @@ vint8m8_t test_vdiv_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, // // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vdiv_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -1037,7 +1037,7 @@ vint16mf4_t test_vdiv_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vdiv_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { @@ -1047,7 +1047,7 @@ vint16mf4_t test_vdiv_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vdiv_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -1057,7 +1057,7 @@ vint16mf2_t test_vdiv_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vdiv_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { @@ -1067,7 +1067,7 @@ vint16mf2_t test_vdiv_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vdiv_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vdiv_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -1077,7 +1077,7 @@ vint16m1_t test_vdiv_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t // // CHECK-RV64-LABEL: @test_vdiv_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vdiv_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { @@ -1087,7 +1087,7 @@ vint16m1_t test_vdiv_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t // // CHECK-RV64-LABEL: @test_vdiv_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vdiv_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -1097,7 +1097,7 @@ vint16m2_t test_vdiv_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // // CHECK-RV64-LABEL: @test_vdiv_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vdiv_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { @@ -1107,7 +1107,7 @@ vint16m2_t test_vdiv_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // // CHECK-RV64-LABEL: @test_vdiv_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vdiv_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1117,7 +1117,7 @@ vint16m4_t test_vdiv_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // // CHECK-RV64-LABEL: @test_vdiv_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vdiv_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { @@ -1127,7 +1127,7 @@ vint16m4_t test_vdiv_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // // CHECK-RV64-LABEL: @test_vdiv_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vdiv_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -1137,7 +1137,7 @@ vint16m8_t test_vdiv_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // // CHECK-RV64-LABEL: @test_vdiv_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vdiv_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { @@ -1147,7 +1147,7 @@ vint16m8_t test_vdiv_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // // CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vdiv_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1157,7 +1157,7 @@ vint32mf2_t test_vdiv_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m // // CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vdiv_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { @@ -1167,7 +1167,7 @@ vint32mf2_t test_vdiv_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m // // CHECK-RV64-LABEL: @test_vdiv_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vdiv_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1177,7 +1177,7 @@ vint32m1_t test_vdiv_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t // // CHECK-RV64-LABEL: @test_vdiv_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vdiv_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { @@ -1187,7 +1187,7 @@ vint32m1_t test_vdiv_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t // // CHECK-RV64-LABEL: @test_vdiv_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vdiv_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1197,7 +1197,7 @@ vint32m2_t test_vdiv_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t // // CHECK-RV64-LABEL: @test_vdiv_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vdiv_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { @@ -1207,7 +1207,7 @@ vint32m2_t test_vdiv_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t // // CHECK-RV64-LABEL: @test_vdiv_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vdiv_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1217,7 +1217,7 @@ vint32m4_t test_vdiv_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // // CHECK-RV64-LABEL: @test_vdiv_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vdiv_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { @@ -1227,7 +1227,7 @@ vint32m4_t test_vdiv_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // // CHECK-RV64-LABEL: @test_vdiv_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vdiv_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1237,7 +1237,7 @@ vint32m8_t test_vdiv_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // // CHECK-RV64-LABEL: @test_vdiv_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vdiv_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { @@ -1247,7 +1247,7 @@ vint32m8_t test_vdiv_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // // CHECK-RV64-LABEL: @test_vdiv_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vdiv_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -1257,7 +1257,7 @@ vint64m1_t test_vdiv_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t // // CHECK-RV64-LABEL: @test_vdiv_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vdiv_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { @@ -1267,7 +1267,7 @@ vint64m1_t test_vdiv_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t // // CHECK-RV64-LABEL: @test_vdiv_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vdiv_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -1277,7 +1277,7 @@ vint64m2_t test_vdiv_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t // // CHECK-RV64-LABEL: @test_vdiv_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vdiv_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { @@ -1287,7 +1287,7 @@ vint64m2_t test_vdiv_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t // // CHECK-RV64-LABEL: @test_vdiv_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vdiv_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -1297,7 +1297,7 @@ vint64m4_t test_vdiv_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t // // CHECK-RV64-LABEL: @test_vdiv_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vdiv_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { @@ -1307,7 +1307,7 @@ vint64m4_t test_vdiv_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t // // CHECK-RV64-LABEL: @test_vdiv_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vdiv_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -1317,7 +1317,7 @@ vint64m8_t test_vdiv_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // // CHECK-RV64-LABEL: @test_vdiv_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vdiv_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { @@ -1327,7 +1327,7 @@ vint64m8_t test_vdiv_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vdivu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1337,7 +1337,7 @@ vuint8mf8_t test_vdivu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m // // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vdivu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -1347,7 +1347,7 @@ vuint8mf8_t test_vdivu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m // // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vdivu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1357,7 +1357,7 @@ vuint8mf4_t test_vdivu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m // // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vdivu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -1367,7 +1367,7 @@ vuint8mf4_t test_vdivu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m // // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vdivu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1377,7 +1377,7 @@ vuint8mf2_t test_vdivu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m // // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vdivu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -1387,7 +1387,7 @@ vuint8mf2_t test_vdivu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m // // CHECK-RV64-LABEL: @test_vdivu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vdivu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1397,7 +1397,7 @@ vuint8m1_t test_vdivu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // // CHECK-RV64-LABEL: @test_vdivu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vdivu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -1407,7 +1407,7 @@ vuint8m1_t test_vdivu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // // CHECK-RV64-LABEL: @test_vdivu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vdivu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1417,7 +1417,7 @@ vuint8m2_t test_vdivu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // // CHECK-RV64-LABEL: @test_vdivu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vdivu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -1427,7 +1427,7 @@ vuint8m2_t test_vdivu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // // CHECK-RV64-LABEL: @test_vdivu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vdivu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -1437,7 +1437,7 @@ vuint8m4_t test_vdivu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // // CHECK-RV64-LABEL: @test_vdivu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vdivu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -1447,7 +1447,7 @@ vuint8m4_t test_vdivu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // // CHECK-RV64-LABEL: @test_vdivu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vdivu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -1457,7 +1457,7 @@ vuint8m8_t test_vdivu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // // CHECK-RV64-LABEL: @test_vdivu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vdivu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -1467,7 +1467,7 @@ vuint8m8_t test_vdivu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vdivu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -1477,7 +1477,7 @@ vuint16mf4_t test_vdivu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vdivu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -1487,7 +1487,7 @@ vuint16mf4_t test_vdivu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vdivu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -1497,7 +1497,7 @@ vuint16mf2_t test_vdivu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vdivu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -1507,7 +1507,7 @@ vuint16mf2_t test_vdivu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vdivu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vdivu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -1517,7 +1517,7 @@ vuint16m1_t test_vdivu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16 // // CHECK-RV64-LABEL: @test_vdivu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vdivu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -1527,7 +1527,7 @@ vuint16m1_t test_vdivu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16 // // CHECK-RV64-LABEL: @test_vdivu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vdivu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -1537,7 +1537,7 @@ vuint16m2_t test_vdivu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m // // CHECK-RV64-LABEL: @test_vdivu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vdivu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -1547,7 +1547,7 @@ vuint16m2_t test_vdivu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m // // CHECK-RV64-LABEL: @test_vdivu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vdivu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -1557,7 +1557,7 @@ vuint16m4_t test_vdivu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m // // CHECK-RV64-LABEL: @test_vdivu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vdivu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -1567,7 +1567,7 @@ vuint16m4_t test_vdivu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m // // CHECK-RV64-LABEL: @test_vdivu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vdivu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -1577,7 +1577,7 @@ vuint16m8_t test_vdivu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m // // CHECK-RV64-LABEL: @test_vdivu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vdivu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -1587,7 +1587,7 @@ vuint16m8_t test_vdivu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m // // CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vdivu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1597,7 +1597,7 @@ vuint32mf2_t test_vdivu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vdivu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1607,7 +1607,7 @@ vuint32mf2_t test_vdivu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vdivu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vdivu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1617,7 +1617,7 @@ vuint32m1_t test_vdivu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32 // // CHECK-RV64-LABEL: @test_vdivu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vdivu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1627,7 +1627,7 @@ vuint32m1_t test_vdivu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32 // // CHECK-RV64-LABEL: @test_vdivu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vdivu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1637,7 +1637,7 @@ vuint32m2_t test_vdivu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32 // // CHECK-RV64-LABEL: @test_vdivu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vdivu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1647,7 +1647,7 @@ vuint32m2_t test_vdivu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32 // // CHECK-RV64-LABEL: @test_vdivu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vdivu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1657,7 +1657,7 @@ vuint32m4_t test_vdivu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vdivu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vdivu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1667,7 +1667,7 @@ vuint32m4_t test_vdivu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vdivu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vdivu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1677,7 +1677,7 @@ vuint32m8_t test_vdivu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vdivu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vdivu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -1687,7 +1687,7 @@ vuint32m8_t test_vdivu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vdivu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vdivu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -1697,7 +1697,7 @@ vuint64m1_t test_vdivu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64 // // CHECK-RV64-LABEL: @test_vdivu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vdivu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -1707,7 +1707,7 @@ vuint64m1_t test_vdivu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64 // // CHECK-RV64-LABEL: @test_vdivu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vdivu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -1717,7 +1717,7 @@ vuint64m2_t test_vdivu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64 // // CHECK-RV64-LABEL: @test_vdivu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vdivu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -1727,7 +1727,7 @@ vuint64m2_t test_vdivu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64 // // CHECK-RV64-LABEL: @test_vdivu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vdivu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -1737,7 +1737,7 @@ vuint64m4_t test_vdivu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64 // // CHECK-RV64-LABEL: @test_vdivu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vdivu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -1747,7 +1747,7 @@ vuint64m4_t test_vdivu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64 // // CHECK-RV64-LABEL: @test_vdivu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vdivu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1757,7 +1757,7 @@ vuint64m8_t test_vdivu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vdivu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vdivu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c index 431c1fa..86a7e7049 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c @@ -98,7 +98,7 @@ vfloat64m8_t test_vfabs_v_f64m8 (vfloat64m8_t op1, size_t vl) { // // CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfabs_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { @@ -108,7 +108,7 @@ vfloat32mf2_t test_vfabs_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vf // // CHECK-RV64-LABEL: @test_vfabs_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfabs_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { @@ -118,7 +118,7 @@ vfloat32m1_t test_vfabs_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloa // // CHECK-RV64-LABEL: @test_vfabs_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfabs_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { @@ -128,7 +128,7 @@ vfloat32m2_t test_vfabs_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloa // // CHECK-RV64-LABEL: @test_vfabs_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfabs_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { @@ -138,7 +138,7 @@ vfloat32m4_t test_vfabs_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat // // CHECK-RV64-LABEL: @test_vfabs_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfabs_v_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { @@ -148,7 +148,7 @@ vfloat32m8_t test_vfabs_v_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat // // CHECK-RV64-LABEL: @test_vfabs_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfabs_v_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { @@ -158,7 +158,7 @@ vfloat64m1_t test_vfabs_v_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloa // // CHECK-RV64-LABEL: @test_vfabs_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfabs_v_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { @@ -168,7 +168,7 @@ vfloat64m2_t test_vfabs_v_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloa // // CHECK-RV64-LABEL: @test_vfabs_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfabs_v_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { @@ -178,7 +178,7 @@ vfloat64m4_t test_vfabs_v_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloa // // CHECK-RV64-LABEL: @test_vfabs_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfabs_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c index b9efbd7..40129bb 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c @@ -188,7 +188,7 @@ vfloat64m8_t test_vfadd_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { // // CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { @@ -198,7 +198,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vf // // CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { @@ -208,7 +208,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vf // // CHECK-RV64-LABEL: @test_vfadd_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { @@ -218,7 +218,7 @@ vfloat32m1_t test_vfadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloa // // CHECK-RV64-LABEL: @test_vfadd_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { @@ -228,7 +228,7 @@ vfloat32m1_t test_vfadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloa // // CHECK-RV64-LABEL: @test_vfadd_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { @@ -238,7 +238,7 @@ vfloat32m2_t test_vfadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloa // // CHECK-RV64-LABEL: @test_vfadd_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { @@ -248,7 +248,7 @@ vfloat32m2_t test_vfadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloa // // CHECK-RV64-LABEL: @test_vfadd_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { @@ -258,7 +258,7 @@ vfloat32m4_t test_vfadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat // // CHECK-RV64-LABEL: @test_vfadd_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { @@ -268,7 +268,7 @@ vfloat32m4_t test_vfadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat // // CHECK-RV64-LABEL: @test_vfadd_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { @@ -278,7 +278,7 @@ vfloat32m8_t test_vfadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat // // CHECK-RV64-LABEL: @test_vfadd_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { @@ -288,7 +288,7 @@ vfloat32m8_t test_vfadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat // // CHECK-RV64-LABEL: @test_vfadd_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { @@ -298,7 +298,7 @@ vfloat64m1_t test_vfadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloa // // CHECK-RV64-LABEL: @test_vfadd_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { @@ -308,7 +308,7 @@ vfloat64m1_t test_vfadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloa // // CHECK-RV64-LABEL: @test_vfadd_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { @@ -318,7 +318,7 @@ vfloat64m2_t test_vfadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloa // // CHECK-RV64-LABEL: @test_vfadd_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { @@ -328,7 +328,7 @@ vfloat64m2_t test_vfadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloa // // CHECK-RV64-LABEL: @test_vfadd_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { @@ -338,7 +338,7 @@ vfloat64m4_t test_vfadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloa // // CHECK-RV64-LABEL: @test_vfadd_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { @@ -348,7 +348,7 @@ vfloat64m4_t test_vfadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloa // // CHECK-RV64-LABEL: @test_vfadd_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { @@ -358,7 +358,7 @@ vfloat64m8_t test_vfadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat // // CHECK-RV64-LABEL: @test_vfadd_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfclass.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfclass.c index 927a459..131182f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfclass.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfclass.c @@ -98,7 +98,7 @@ vuint64m8_t test_vfclass_v_u64m8(vfloat64m8_t op1, size_t vl) { // // CHECK-RV64-LABEL: @test_vfclass_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfclass_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -109,7 +109,7 @@ vuint32mf2_t test_vfclass_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfclass_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfclass_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -120,7 +120,7 @@ vuint32m1_t test_vfclass_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfclass_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfclass_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -131,7 +131,7 @@ vuint32m2_t test_vfclass_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfclass_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfclass_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -142,7 +142,7 @@ vuint32m4_t test_vfclass_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfclass_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfclass_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -153,7 +153,7 @@ vuint32m8_t test_vfclass_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfclass_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfclass_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -164,7 +164,7 @@ vuint64m1_t test_vfclass_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfclass_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfclass_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -175,7 +175,7 @@ vuint64m2_t test_vfclass_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfclass_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfclass_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -186,7 +186,7 @@ vuint64m4_t test_vfclass_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfclass_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfclass_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfcvt.c index 14f9dee..bd50cad 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfcvt.c @@ -548,7 +548,7 @@ vfloat64m8_t test_vfcvt_f_xu_v_f64m8(vuint64m8_t src, size_t vl) { // // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfcvt_x_f_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -559,7 +559,7 @@ vint32mf2_t test_vfcvt_x_f_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -570,7 +570,7 @@ vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfcvt_x_f_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -581,7 +581,7 @@ vint32m1_t test_vfcvt_x_f_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -592,7 +592,7 @@ vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfcvt_x_f_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -603,7 +603,7 @@ vint32m2_t test_vfcvt_x_f_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -614,7 +614,7 @@ vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfcvt_x_f_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -625,7 +625,7 @@ vint32m4_t test_vfcvt_x_f_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -636,7 +636,7 @@ vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vfcvt_x_f_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -647,7 +647,7 @@ vint32m8_t test_vfcvt_x_f_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -658,7 +658,7 @@ vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -669,7 +669,7 @@ vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_m(vbool64_t mask, @@ -681,7 +681,7 @@ vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_m(vbool64_t mask, // // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfcvt_xu_f_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -692,7 +692,7 @@ vuint32m1_t test_vfcvt_xu_f_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -703,7 +703,7 @@ vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfcvt_xu_f_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -714,7 +714,7 @@ vuint32m2_t test_vfcvt_xu_f_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -725,7 +725,7 @@ vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfcvt_xu_f_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -736,7 +736,7 @@ vuint32m4_t test_vfcvt_xu_f_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -747,7 +747,7 @@ vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfcvt_xu_f_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -758,7 +758,7 @@ vuint32m8_t test_vfcvt_xu_f_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -769,7 +769,7 @@ vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -780,7 +780,7 @@ vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfcvt_f_x_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -791,7 +791,7 @@ vfloat32m1_t test_vfcvt_f_x_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfcvt_f_x_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -802,7 +802,7 @@ vfloat32m2_t test_vfcvt_f_x_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfcvt_f_x_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -813,7 +813,7 @@ vfloat32m4_t test_vfcvt_f_x_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfcvt_f_x_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -824,7 +824,7 @@ vfloat32m8_t test_vfcvt_f_x_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_m(vbool64_t mask, @@ -836,7 +836,7 @@ vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_m(vbool64_t mask, // // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfcvt_f_xu_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -847,7 +847,7 @@ vfloat32m1_t test_vfcvt_f_xu_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfcvt_f_xu_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -858,7 +858,7 @@ vfloat32m2_t test_vfcvt_f_xu_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfcvt_f_xu_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -869,7 +869,7 @@ vfloat32m4_t test_vfcvt_f_xu_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfcvt_f_xu_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -880,7 +880,7 @@ vfloat32m8_t test_vfcvt_f_xu_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfcvt_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -891,7 +891,7 @@ vint64m1_t test_vfcvt_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -902,7 +902,7 @@ vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfcvt_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -913,7 +913,7 @@ vint64m2_t test_vfcvt_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -924,7 +924,7 @@ vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfcvt_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -935,7 +935,7 @@ vint64m4_t test_vfcvt_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -946,7 +946,7 @@ vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfcvt_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -957,7 +957,7 @@ vint64m8_t test_vfcvt_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -968,7 +968,7 @@ vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfcvt_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -979,7 +979,7 @@ vuint64m1_t test_vfcvt_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -990,7 +990,7 @@ vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfcvt_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -1001,7 +1001,7 @@ vuint64m2_t test_vfcvt_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -1012,7 +1012,7 @@ vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfcvt_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -1023,7 +1023,7 @@ vuint64m4_t test_vfcvt_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -1034,7 +1034,7 @@ vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfcvt_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, @@ -1045,7 +1045,7 @@ vuint64m8_t test_vfcvt_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, @@ -1056,7 +1056,7 @@ vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfcvt_f_x_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -1067,7 +1067,7 @@ vfloat64m1_t test_vfcvt_f_x_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfcvt_f_x_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -1078,7 +1078,7 @@ vfloat64m2_t test_vfcvt_f_x_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfcvt_f_x_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -1089,7 +1089,7 @@ vfloat64m4_t test_vfcvt_f_x_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfcvt_f_x_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -1100,7 +1100,7 @@ vfloat64m8_t test_vfcvt_f_x_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfcvt_f_xu_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -1111,7 +1111,7 @@ vfloat64m1_t test_vfcvt_f_xu_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfcvt_f_xu_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -1122,7 +1122,7 @@ vfloat64m2_t test_vfcvt_f_xu_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfcvt_f_xu_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -1133,7 +1133,7 @@ vfloat64m4_t test_vfcvt_f_xu_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfcvt_f_xu_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c index 4a2b144..5fdf00e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c @@ -197,7 +197,7 @@ vfloat64m8_t test_vfdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { // // CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfdiv_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -209,7 +209,7 @@ vfloat32mf2_t test_vfdiv_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -220,7 +220,7 @@ vfloat32mf2_t test_vfdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfdiv_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -232,7 +232,7 @@ vfloat32m1_t test_vfdiv_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -243,7 +243,7 @@ vfloat32m1_t test_vfdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfdiv_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -255,7 +255,7 @@ vfloat32m2_t test_vfdiv_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -266,7 +266,7 @@ vfloat32m2_t test_vfdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfdiv_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -278,7 +278,7 @@ vfloat32m4_t test_vfdiv_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -289,7 +289,7 @@ vfloat32m4_t test_vfdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfdiv_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -301,7 +301,7 @@ vfloat32m8_t test_vfdiv_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -312,7 +312,7 @@ vfloat32m8_t test_vfdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfdiv_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -324,7 +324,7 @@ vfloat64m1_t test_vfdiv_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -335,7 +335,7 @@ vfloat64m1_t test_vfdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfdiv_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -347,7 +347,7 @@ vfloat64m2_t test_vfdiv_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -358,7 +358,7 @@ vfloat64m2_t test_vfdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfdiv_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -370,7 +370,7 @@ vfloat64m4_t test_vfdiv_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -381,7 +381,7 @@ vfloat64m4_t test_vfdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfdiv_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -393,7 +393,7 @@ vfloat64m8_t test_vfdiv_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfirst.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfirst.c index 5005d4b..70d2264 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfirst.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfirst.c @@ -63,7 +63,7 @@ long test_vfirst_m_b64(vbool64_t op1, size_t vl) { return vfirst(op1, vl); } // // CHECK-RV64-LABEL: @test_vfirst_m_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv64i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv64i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // long test_vfirst_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) { @@ -73,7 +73,7 @@ long test_vfirst_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) { // // CHECK-RV64-LABEL: @test_vfirst_m_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv32i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv32i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // long test_vfirst_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) { @@ -83,7 +83,7 @@ long test_vfirst_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) { // // CHECK-RV64-LABEL: @test_vfirst_m_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv16i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv16i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // long test_vfirst_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) { @@ -93,7 +93,7 @@ long test_vfirst_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) { // // CHECK-RV64-LABEL: @test_vfirst_m_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv8i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv8i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // long test_vfirst_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) { @@ -103,7 +103,7 @@ long test_vfirst_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) { // // CHECK-RV64-LABEL: @test_vfirst_m_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv4i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv4i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // long test_vfirst_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) { @@ -113,7 +113,7 @@ long test_vfirst_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) { // // CHECK-RV64-LABEL: @test_vfirst_m_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv2i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv2i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // long test_vfirst_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) { @@ -123,7 +123,7 @@ long test_vfirst_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) { // // CHECK-RV64-LABEL: @test_vfirst_m_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv1i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv1i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // long test_vfirst_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmacc.c index de64c4c..54174fa 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmacc.c @@ -206,7 +206,7 @@ vfloat64m8_t test_vfmacc_vf_f64m8(vfloat64m8_t acc, double op1, // // CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -218,7 +218,7 @@ vfloat32mf2_t test_vfmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, // // CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -229,7 +229,7 @@ vfloat32mf2_t test_vfmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, // // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -241,7 +241,7 @@ vfloat32m1_t test_vfmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, // // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1, @@ -252,7 +252,7 @@ vfloat32m1_t test_vfmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1, // // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -264,7 +264,7 @@ vfloat32m2_t test_vfmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, // // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1, @@ -275,7 +275,7 @@ vfloat32m2_t test_vfmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1, // // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, @@ -287,7 +287,7 @@ vfloat32m4_t test_vfmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, // // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, @@ -298,7 +298,7 @@ vfloat32m4_t test_vfmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, // // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, @@ -310,7 +310,7 @@ vfloat32m8_t test_vfmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, // // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, @@ -321,7 +321,7 @@ vfloat32m8_t test_vfmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, // // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -333,7 +333,7 @@ vfloat64m1_t test_vfmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, // // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -344,7 +344,7 @@ vfloat64m1_t test_vfmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, // // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -356,7 +356,7 @@ vfloat64m2_t test_vfmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, // // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -367,7 +367,7 @@ vfloat64m2_t test_vfmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, // // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -379,7 +379,7 @@ vfloat64m4_t test_vfmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, // // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -390,7 +390,7 @@ vfloat64m4_t test_vfmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, // // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -402,7 +402,7 @@ vfloat64m8_t test_vfmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, // // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmadd.c index 43f636d..fda3ee2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmadd.c @@ -206,7 +206,7 @@ vfloat64m8_t test_vfmadd_vf_f64m8(vfloat64m8_t acc, double op1, // // CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -218,7 +218,7 @@ vfloat32mf2_t test_vfmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, // // CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -229,7 +229,7 @@ vfloat32mf2_t test_vfmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, // // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -241,7 +241,7 @@ vfloat32m1_t test_vfmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, // // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1, @@ -252,7 +252,7 @@ vfloat32m1_t test_vfmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1, // // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -264,7 +264,7 @@ vfloat32m2_t test_vfmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, // // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1, @@ -275,7 +275,7 @@ vfloat32m2_t test_vfmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1, // // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, @@ -287,7 +287,7 @@ vfloat32m4_t test_vfmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, // // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, @@ -298,7 +298,7 @@ vfloat32m4_t test_vfmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, // // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, @@ -310,7 +310,7 @@ vfloat32m8_t test_vfmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, // // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, @@ -321,7 +321,7 @@ vfloat32m8_t test_vfmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, // // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -333,7 +333,7 @@ vfloat64m1_t test_vfmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, // // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -344,7 +344,7 @@ vfloat64m1_t test_vfmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, // // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -356,7 +356,7 @@ vfloat64m2_t test_vfmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, // // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -367,7 +367,7 @@ vfloat64m2_t test_vfmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, // // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -379,7 +379,7 @@ vfloat64m4_t test_vfmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, // // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -390,7 +390,7 @@ vfloat64m4_t test_vfmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, // // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -402,7 +402,7 @@ vfloat64m8_t test_vfmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, // // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c index 50ea952..c5c516a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c @@ -197,7 +197,7 @@ vfloat64m8_t test_vfmax_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { // // CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmax_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -209,7 +209,7 @@ vfloat32mf2_t test_vfmax_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmax_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -220,7 +220,7 @@ vfloat32mf2_t test_vfmax_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmax_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmax_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -232,7 +232,7 @@ vfloat32m1_t test_vfmax_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmax_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmax_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -243,7 +243,7 @@ vfloat32m1_t test_vfmax_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmax_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmax_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -255,7 +255,7 @@ vfloat32m2_t test_vfmax_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmax_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmax_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -266,7 +266,7 @@ vfloat32m2_t test_vfmax_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmax_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmax_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -278,7 +278,7 @@ vfloat32m4_t test_vfmax_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmax_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmax_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -289,7 +289,7 @@ vfloat32m4_t test_vfmax_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmax_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmax_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -301,7 +301,7 @@ vfloat32m8_t test_vfmax_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmax_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmax_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -312,7 +312,7 @@ vfloat32m8_t test_vfmax_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmax_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmax_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -324,7 +324,7 @@ vfloat64m1_t test_vfmax_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmax_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmax_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -335,7 +335,7 @@ vfloat64m1_t test_vfmax_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmax_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmax_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -347,7 +347,7 @@ vfloat64m2_t test_vfmax_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmax_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmax_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -358,7 +358,7 @@ vfloat64m2_t test_vfmax_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmax_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmax_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -370,7 +370,7 @@ vfloat64m4_t test_vfmax_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmax_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmax_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -381,7 +381,7 @@ vfloat64m4_t test_vfmax_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmax_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmax_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -393,7 +393,7 @@ vfloat64m8_t test_vfmax_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmax_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmax_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmerge.c index cfeb02c..09bce7c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmerge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmerge.c @@ -8,7 +8,7 @@ // // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmerge_vfm_f32mf2(vbool64_t mask, vfloat32mf2_t op1, @@ -19,7 +19,7 @@ vfloat32mf2_t test_vfmerge_vfm_f32mf2(vbool64_t mask, vfloat32mf2_t op1, // // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmerge_vfm_f32m1(vbool32_t mask, vfloat32m1_t op1, float op2, @@ -30,7 +30,7 @@ vfloat32m1_t test_vfmerge_vfm_f32m1(vbool32_t mask, vfloat32m1_t op1, float op2, // // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmerge_vfm_f32m2(vbool16_t mask, vfloat32m2_t op1, float op2, @@ -41,7 +41,7 @@ vfloat32m2_t test_vfmerge_vfm_f32m2(vbool16_t mask, vfloat32m2_t op1, float op2, // // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmerge_vfm_f32m4(vbool8_t mask, vfloat32m4_t op1, float op2, @@ -52,7 +52,7 @@ vfloat32m4_t test_vfmerge_vfm_f32m4(vbool8_t mask, vfloat32m4_t op1, float op2, // // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmerge_vfm_f32m8(vbool4_t mask, vfloat32m8_t op1, float op2, @@ -63,7 +63,7 @@ vfloat32m8_t test_vfmerge_vfm_f32m8(vbool4_t mask, vfloat32m8_t op1, float op2, // // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmerge_vfm_f64m1(vbool64_t mask, vfloat64m1_t op1, @@ -74,7 +74,7 @@ vfloat64m1_t test_vfmerge_vfm_f64m1(vbool64_t mask, vfloat64m1_t op1, // // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmerge_vfm_f64m2(vbool32_t mask, vfloat64m2_t op1, @@ -85,7 +85,7 @@ vfloat64m2_t test_vfmerge_vfm_f64m2(vbool32_t mask, vfloat64m2_t op1, // // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmerge_vfm_f64m4(vbool16_t mask, vfloat64m4_t op1, @@ -96,7 +96,7 @@ vfloat64m4_t test_vfmerge_vfm_f64m4(vbool16_t mask, vfloat64m4_t op1, // // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmerge_vfm_f64m8(vbool8_t mask, vfloat64m8_t op1, double op2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c index ce48003..46ceaca 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c @@ -197,7 +197,7 @@ vfloat64m8_t test_vfmin_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { // // CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmin_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -209,7 +209,7 @@ vfloat32mf2_t test_vfmin_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmin_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -220,7 +220,7 @@ vfloat32mf2_t test_vfmin_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmin_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmin_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -232,7 +232,7 @@ vfloat32m1_t test_vfmin_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmin_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmin_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -243,7 +243,7 @@ vfloat32m1_t test_vfmin_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmin_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmin_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -255,7 +255,7 @@ vfloat32m2_t test_vfmin_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmin_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmin_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -266,7 +266,7 @@ vfloat32m2_t test_vfmin_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmin_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmin_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -278,7 +278,7 @@ vfloat32m4_t test_vfmin_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmin_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmin_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -289,7 +289,7 @@ vfloat32m4_t test_vfmin_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmin_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmin_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -301,7 +301,7 @@ vfloat32m8_t test_vfmin_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmin_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmin_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -312,7 +312,7 @@ vfloat32m8_t test_vfmin_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmin_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmin_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -324,7 +324,7 @@ vfloat64m1_t test_vfmin_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmin_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmin_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -335,7 +335,7 @@ vfloat64m1_t test_vfmin_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmin_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmin_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -347,7 +347,7 @@ vfloat64m2_t test_vfmin_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmin_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmin_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -358,7 +358,7 @@ vfloat64m2_t test_vfmin_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmin_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmin_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -370,7 +370,7 @@ vfloat64m4_t test_vfmin_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmin_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmin_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -381,7 +381,7 @@ vfloat64m4_t test_vfmin_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmin_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmin_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -393,7 +393,7 @@ vfloat64m8_t test_vfmin_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmin_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmin_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsac.c index 6af54aa..604da9b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsac.c @@ -206,7 +206,7 @@ vfloat64m8_t test_vfmsac_vf_f64m8(vfloat64m8_t acc, double op1, // // CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -218,7 +218,7 @@ vfloat32mf2_t test_vfmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, // // CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -229,7 +229,7 @@ vfloat32mf2_t test_vfmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, // // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -241,7 +241,7 @@ vfloat32m1_t test_vfmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, // // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1, @@ -252,7 +252,7 @@ vfloat32m1_t test_vfmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1, // // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -264,7 +264,7 @@ vfloat32m2_t test_vfmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, // // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1, @@ -275,7 +275,7 @@ vfloat32m2_t test_vfmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1, // // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, @@ -287,7 +287,7 @@ vfloat32m4_t test_vfmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, // // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, @@ -298,7 +298,7 @@ vfloat32m4_t test_vfmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, // // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, @@ -310,7 +310,7 @@ vfloat32m8_t test_vfmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, // // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, @@ -321,7 +321,7 @@ vfloat32m8_t test_vfmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, // // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -333,7 +333,7 @@ vfloat64m1_t test_vfmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, // // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -344,7 +344,7 @@ vfloat64m1_t test_vfmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, // // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -356,7 +356,7 @@ vfloat64m2_t test_vfmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, // // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -367,7 +367,7 @@ vfloat64m2_t test_vfmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, // // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -379,7 +379,7 @@ vfloat64m4_t test_vfmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, // // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -390,7 +390,7 @@ vfloat64m4_t test_vfmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, // // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -402,7 +402,7 @@ vfloat64m8_t test_vfmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, // // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsub.c index 6e01064..913fe65 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsub.c @@ -206,7 +206,7 @@ vfloat64m8_t test_vfmsub_vf_f64m8(vfloat64m8_t acc, double op1, // // CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -218,7 +218,7 @@ vfloat32mf2_t test_vfmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, // // CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -229,7 +229,7 @@ vfloat32mf2_t test_vfmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, // // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -241,7 +241,7 @@ vfloat32m1_t test_vfmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, // // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1, @@ -252,7 +252,7 @@ vfloat32m1_t test_vfmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1, // // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -264,7 +264,7 @@ vfloat32m2_t test_vfmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, // // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1, @@ -275,7 +275,7 @@ vfloat32m2_t test_vfmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1, // // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, @@ -287,7 +287,7 @@ vfloat32m4_t test_vfmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, // // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, @@ -298,7 +298,7 @@ vfloat32m4_t test_vfmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, // // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, @@ -310,7 +310,7 @@ vfloat32m8_t test_vfmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, // // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, @@ -321,7 +321,7 @@ vfloat32m8_t test_vfmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, // // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -333,7 +333,7 @@ vfloat64m1_t test_vfmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, // // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -344,7 +344,7 @@ vfloat64m1_t test_vfmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, // // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -356,7 +356,7 @@ vfloat64m2_t test_vfmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, // // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -367,7 +367,7 @@ vfloat64m2_t test_vfmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, // // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -379,7 +379,7 @@ vfloat64m4_t test_vfmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, // // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -390,7 +390,7 @@ vfloat64m4_t test_vfmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, // // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -402,7 +402,7 @@ vfloat64m8_t test_vfmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, // // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c index 98c1130..4b1d3a8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c @@ -197,7 +197,7 @@ vfloat64m8_t test_vfmul_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { // // CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmul_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -209,7 +209,7 @@ vfloat32mf2_t test_vfmul_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmul_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -220,7 +220,7 @@ vfloat32mf2_t test_vfmul_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmul_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmul_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -232,7 +232,7 @@ vfloat32m1_t test_vfmul_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmul_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmul_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -243,7 +243,7 @@ vfloat32m1_t test_vfmul_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmul_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmul_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -255,7 +255,7 @@ vfloat32m2_t test_vfmul_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmul_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmul_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -266,7 +266,7 @@ vfloat32m2_t test_vfmul_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmul_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmul_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -278,7 +278,7 @@ vfloat32m4_t test_vfmul_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmul_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmul_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -289,7 +289,7 @@ vfloat32m4_t test_vfmul_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmul_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmul_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -301,7 +301,7 @@ vfloat32m8_t test_vfmul_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmul_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmul_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -312,7 +312,7 @@ vfloat32m8_t test_vfmul_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmul_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmul_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -324,7 +324,7 @@ vfloat64m1_t test_vfmul_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmul_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmul_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -335,7 +335,7 @@ vfloat64m1_t test_vfmul_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmul_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmul_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -347,7 +347,7 @@ vfloat64m2_t test_vfmul_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmul_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmul_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -358,7 +358,7 @@ vfloat64m2_t test_vfmul_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmul_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmul_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -370,7 +370,7 @@ vfloat64m4_t test_vfmul_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmul_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmul_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -381,7 +381,7 @@ vfloat64m4_t test_vfmul_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmul_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmul_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -393,7 +393,7 @@ vfloat64m8_t test_vfmul_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfmul_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfncvt.c index 0223b34..ef9f3fe 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfncvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfncvt.c @@ -528,7 +528,7 @@ vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4(vfloat64m8_t src, size_t vl) { // // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vfncvt_x_f_w_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -539,7 +539,7 @@ vint16mf4_t test_vfncvt_x_f_w_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_m(vbool64_t mask, @@ -551,7 +551,7 @@ vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_m(vbool64_t mask, // // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vfncvt_x_f_w_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -562,7 +562,7 @@ vint16mf2_t test_vfncvt_x_f_w_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_m(vbool32_t mask, @@ -574,7 +574,7 @@ vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_m(vbool32_t mask, // // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vfncvt_x_f_w_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -585,7 +585,7 @@ vint16m1_t test_vfncvt_x_f_w_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -596,7 +596,7 @@ vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vfncvt_x_f_w_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -607,7 +607,7 @@ vint16m2_t test_vfncvt_x_f_w_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -618,7 +618,7 @@ vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vfncvt_x_f_w_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -629,7 +629,7 @@ vint16m4_t test_vfncvt_x_f_w_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -640,7 +640,7 @@ vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -651,7 +651,7 @@ vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_m(vbool64_t mask, @@ -663,7 +663,7 @@ vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_m(vbool64_t mask, // // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -674,7 +674,7 @@ vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_m(vbool32_t mask, @@ -686,7 +686,7 @@ vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_m(vbool32_t mask, // // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfncvt_xu_f_w_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -697,7 +697,7 @@ vuint16m1_t test_vfncvt_xu_f_w_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_m(vbool16_t mask, @@ -709,7 +709,7 @@ vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_m(vbool16_t mask, // // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfncvt_xu_f_w_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -720,7 +720,7 @@ vuint16m2_t test_vfncvt_xu_f_w_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -731,7 +731,7 @@ vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfncvt_xu_f_w_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -742,7 +742,7 @@ vuint16m4_t test_vfncvt_xu_f_w_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -753,7 +753,7 @@ vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfncvt_x_f_w_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -764,7 +764,7 @@ vint32mf2_t test_vfncvt_x_f_w_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_m(vbool64_t mask, @@ -776,7 +776,7 @@ vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_m(vbool64_t mask, // // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfncvt_x_f_w_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -787,7 +787,7 @@ vint32m1_t test_vfncvt_x_f_w_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -798,7 +798,7 @@ vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfncvt_x_f_w_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -809,7 +809,7 @@ vint32m2_t test_vfncvt_x_f_w_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -820,7 +820,7 @@ vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfncvt_x_f_w_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -831,7 +831,7 @@ vint32m4_t test_vfncvt_x_f_w_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -842,7 +842,7 @@ vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -853,7 +853,7 @@ vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_m(vbool64_t mask, @@ -865,7 +865,7 @@ vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_m(vbool64_t mask, // // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfncvt_xu_f_w_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -876,7 +876,7 @@ vuint32m1_t test_vfncvt_xu_f_w_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_m(vbool32_t mask, @@ -888,7 +888,7 @@ vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_m(vbool32_t mask, // // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfncvt_xu_f_w_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -899,7 +899,7 @@ vuint32m2_t test_vfncvt_xu_f_w_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_m(vbool16_t mask, @@ -911,7 +911,7 @@ vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_m(vbool16_t mask, // // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfncvt_xu_f_w_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -922,7 +922,7 @@ vuint32m4_t test_vfncvt_xu_f_w_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -933,7 +933,7 @@ vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_m(vbool64_t mask, @@ -945,7 +945,7 @@ vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_m(vbool64_t mask, // // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_f_x_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -956,7 +956,7 @@ vfloat32m1_t test_vfncvt_f_x_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_f_x_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -967,7 +967,7 @@ vfloat32m2_t test_vfncvt_f_x_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_f_x_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -978,7 +978,7 @@ vfloat32m4_t test_vfncvt_f_x_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_m(vbool64_t mask, @@ -990,7 +990,7 @@ vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_m(vbool64_t mask, // // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_f_xu_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -1001,7 +1001,7 @@ vfloat32m1_t test_vfncvt_f_xu_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_f_xu_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -1012,7 +1012,7 @@ vfloat32m2_t test_vfncvt_f_xu_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_f_xu_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -1023,7 +1023,7 @@ vfloat32m4_t test_vfncvt_f_xu_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_m(vbool64_t mask, @@ -1035,7 +1035,7 @@ vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_m(vbool64_t mask, // // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_m(vbool64_t mask, @@ -1047,7 +1047,7 @@ vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_m(vbool64_t mask, // // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_f_f_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -1058,7 +1058,7 @@ vfloat32m1_t test_vfncvt_f_f_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_m(vbool32_t mask, @@ -1070,7 +1070,7 @@ vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_m(vbool32_t mask, // // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_f_f_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -1081,7 +1081,7 @@ vfloat32m2_t test_vfncvt_f_f_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_m(vbool16_t mask, @@ -1093,7 +1093,7 @@ vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_m(vbool16_t mask, // // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_f_f_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -1104,7 +1104,7 @@ vfloat32m4_t test_vfncvt_f_f_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_m(vbool8_t mask, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c index 8ac2d95..cbab1f6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c @@ -98,7 +98,7 @@ vfloat64m8_t test_vfneg_v_f64m8 (vfloat64m8_t op1, size_t vl) { // // CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfneg_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { @@ -108,7 +108,7 @@ vfloat32mf2_t test_vfneg_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vf // // CHECK-RV64-LABEL: @test_vfneg_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfneg_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { @@ -118,7 +118,7 @@ vfloat32m1_t test_vfneg_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloa // // CHECK-RV64-LABEL: @test_vfneg_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfneg_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { @@ -128,7 +128,7 @@ vfloat32m2_t test_vfneg_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloa // // CHECK-RV64-LABEL: @test_vfneg_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfneg_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { @@ -138,7 +138,7 @@ vfloat32m4_t test_vfneg_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat // // CHECK-RV64-LABEL: @test_vfneg_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfneg_v_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { @@ -148,7 +148,7 @@ vfloat32m8_t test_vfneg_v_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat // // CHECK-RV64-LABEL: @test_vfneg_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfneg_v_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { @@ -158,7 +158,7 @@ vfloat64m1_t test_vfneg_v_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloa // // CHECK-RV64-LABEL: @test_vfneg_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfneg_v_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { @@ -168,7 +168,7 @@ vfloat64m2_t test_vfneg_v_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloa // // CHECK-RV64-LABEL: @test_vfneg_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfneg_v_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { @@ -178,7 +178,7 @@ vfloat64m4_t test_vfneg_v_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloa // // CHECK-RV64-LABEL: @test_vfneg_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfneg_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmacc.c index 6c6aed7..cebd237 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmacc.c @@ -206,7 +206,7 @@ vfloat64m8_t test_vfnmacc_vf_f64m8(vfloat64m8_t acc, double op1, // // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -218,7 +218,7 @@ vfloat32mf2_t test_vfnmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, // // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -230,7 +230,7 @@ vfloat32mf2_t test_vfnmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, // // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -242,7 +242,7 @@ vfloat32m1_t test_vfnmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, // // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -253,7 +253,7 @@ vfloat32m1_t test_vfnmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, // // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -265,7 +265,7 @@ vfloat32m2_t test_vfnmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, // // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -276,7 +276,7 @@ vfloat32m2_t test_vfnmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, // // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, @@ -288,7 +288,7 @@ vfloat32m4_t test_vfnmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, // // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, @@ -299,7 +299,7 @@ vfloat32m4_t test_vfnmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, // // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, @@ -311,7 +311,7 @@ vfloat32m8_t test_vfnmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, // // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, @@ -322,7 +322,7 @@ vfloat32m8_t test_vfnmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, // // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -334,7 +334,7 @@ vfloat64m1_t test_vfnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, // // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -345,7 +345,7 @@ vfloat64m1_t test_vfnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, // // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -357,7 +357,7 @@ vfloat64m2_t test_vfnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, // // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -368,7 +368,7 @@ vfloat64m2_t test_vfnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, // // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -380,7 +380,7 @@ vfloat64m4_t test_vfnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, // // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -391,7 +391,7 @@ vfloat64m4_t test_vfnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, // // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -403,7 +403,7 @@ vfloat64m8_t test_vfnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, // // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmadd.c index 1ea9a02..56feb08 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmadd.c @@ -206,7 +206,7 @@ vfloat64m8_t test_vfnmadd_vf_f64m8(vfloat64m8_t acc, double op1, // // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -218,7 +218,7 @@ vfloat32mf2_t test_vfnmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, // // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -230,7 +230,7 @@ vfloat32mf2_t test_vfnmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, // // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -242,7 +242,7 @@ vfloat32m1_t test_vfnmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, // // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -253,7 +253,7 @@ vfloat32m1_t test_vfnmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, // // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -265,7 +265,7 @@ vfloat32m2_t test_vfnmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, // // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -276,7 +276,7 @@ vfloat32m2_t test_vfnmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, // // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, @@ -288,7 +288,7 @@ vfloat32m4_t test_vfnmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, // // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, @@ -299,7 +299,7 @@ vfloat32m4_t test_vfnmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, // // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, @@ -311,7 +311,7 @@ vfloat32m8_t test_vfnmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, // // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, @@ -322,7 +322,7 @@ vfloat32m8_t test_vfnmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, // // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -334,7 +334,7 @@ vfloat64m1_t test_vfnmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, // // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -345,7 +345,7 @@ vfloat64m1_t test_vfnmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, // // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -357,7 +357,7 @@ vfloat64m2_t test_vfnmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, // // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -368,7 +368,7 @@ vfloat64m2_t test_vfnmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, // // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -380,7 +380,7 @@ vfloat64m4_t test_vfnmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, // // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -391,7 +391,7 @@ vfloat64m4_t test_vfnmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, // // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -403,7 +403,7 @@ vfloat64m8_t test_vfnmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, // // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsac.c index c930ac2..3bcdfda 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsac.c @@ -206,7 +206,7 @@ vfloat64m8_t test_vfnmsac_vf_f64m8(vfloat64m8_t acc, double op1, // // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -218,7 +218,7 @@ vfloat32mf2_t test_vfnmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, // // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -230,7 +230,7 @@ vfloat32mf2_t test_vfnmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, // // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -242,7 +242,7 @@ vfloat32m1_t test_vfnmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, // // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -253,7 +253,7 @@ vfloat32m1_t test_vfnmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, // // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -265,7 +265,7 @@ vfloat32m2_t test_vfnmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, // // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -276,7 +276,7 @@ vfloat32m2_t test_vfnmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, // // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, @@ -288,7 +288,7 @@ vfloat32m4_t test_vfnmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, // // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, @@ -299,7 +299,7 @@ vfloat32m4_t test_vfnmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, // // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, @@ -311,7 +311,7 @@ vfloat32m8_t test_vfnmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, // // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, @@ -322,7 +322,7 @@ vfloat32m8_t test_vfnmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, // // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -334,7 +334,7 @@ vfloat64m1_t test_vfnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, // // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -345,7 +345,7 @@ vfloat64m1_t test_vfnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, // // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -357,7 +357,7 @@ vfloat64m2_t test_vfnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, // // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -368,7 +368,7 @@ vfloat64m2_t test_vfnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, // // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -380,7 +380,7 @@ vfloat64m4_t test_vfnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, // // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -391,7 +391,7 @@ vfloat64m4_t test_vfnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, // // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -403,7 +403,7 @@ vfloat64m8_t test_vfnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, // // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsub.c index 7a43eddc..ba316ca0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsub.c @@ -206,7 +206,7 @@ vfloat64m8_t test_vfnmsub_vf_f64m8(vfloat64m8_t acc, double op1, // // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -218,7 +218,7 @@ vfloat32mf2_t test_vfnmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, // // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -230,7 +230,7 @@ vfloat32mf2_t test_vfnmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, // // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -242,7 +242,7 @@ vfloat32m1_t test_vfnmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, // // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -253,7 +253,7 @@ vfloat32m1_t test_vfnmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, // // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -265,7 +265,7 @@ vfloat32m2_t test_vfnmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, // // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -276,7 +276,7 @@ vfloat32m2_t test_vfnmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, // // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, @@ -288,7 +288,7 @@ vfloat32m4_t test_vfnmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, // // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, @@ -299,7 +299,7 @@ vfloat32m4_t test_vfnmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, // // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, @@ -311,7 +311,7 @@ vfloat32m8_t test_vfnmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, // // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, @@ -322,7 +322,7 @@ vfloat32m8_t test_vfnmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, // // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -334,7 +334,7 @@ vfloat64m1_t test_vfnmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, // // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -345,7 +345,7 @@ vfloat64m1_t test_vfnmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, // // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -357,7 +357,7 @@ vfloat64m2_t test_vfnmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, // // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -368,7 +368,7 @@ vfloat64m2_t test_vfnmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, // // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -380,7 +380,7 @@ vfloat64m4_t test_vfnmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, // // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -391,7 +391,7 @@ vfloat64m4_t test_vfnmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, // // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -403,7 +403,7 @@ vfloat64m8_t test_vfnmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, // // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c index 59849b5..db1e672 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c @@ -98,7 +98,7 @@ vfloat64m8_t test_vfrdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { // // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -109,7 +109,7 @@ vfloat32mf2_t test_vfrdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -120,7 +120,7 @@ vfloat32m1_t test_vfrdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -131,7 +131,7 @@ vfloat32m2_t test_vfrdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -142,7 +142,7 @@ vfloat32m4_t test_vfrdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -153,7 +153,7 @@ vfloat32m8_t test_vfrdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -164,7 +164,7 @@ vfloat64m1_t test_vfrdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -175,7 +175,7 @@ vfloat64m2_t test_vfrdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -186,7 +186,7 @@ vfloat64m4_t test_vfrdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrec7.c index d6dcd73..f044b2e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrec7.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrec7.c @@ -98,7 +98,7 @@ vfloat64m8_t test_vfrec7_v_f64m8(vfloat64m8_t op1, size_t vl) { // // CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrec7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -109,7 +109,7 @@ vfloat32mf2_t test_vfrec7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfrec7_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrec7_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -120,7 +120,7 @@ vfloat32m1_t test_vfrec7_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfrec7_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrec7_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -131,7 +131,7 @@ vfloat32m2_t test_vfrec7_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfrec7_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrec7_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -142,7 +142,7 @@ vfloat32m4_t test_vfrec7_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfrec7_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrec7_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -153,7 +153,7 @@ vfloat32m8_t test_vfrec7_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfrec7_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrec7_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -164,7 +164,7 @@ vfloat64m1_t test_vfrec7_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfrec7_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrec7_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -175,7 +175,7 @@ vfloat64m2_t test_vfrec7_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfrec7_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrec7_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -186,7 +186,7 @@ vfloat64m4_t test_vfrec7_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfrec7_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrec7_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmax.c index a96977c..0d74456 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmax.c @@ -108,7 +108,7 @@ vfloat64m1_t test_vfredmax_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector, // // CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst, @@ -120,7 +120,7 @@ vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst, // // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst, @@ -132,7 +132,7 @@ vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst, // // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst, @@ -144,7 +144,7 @@ vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst, // // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst, @@ -156,7 +156,7 @@ vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst, // // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst, @@ -168,7 +168,7 @@ vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst, // // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst, @@ -180,7 +180,7 @@ vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst, // // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst, @@ -192,7 +192,7 @@ vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst, // // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst, @@ -204,7 +204,7 @@ vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst, // // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmin.c index 0947610..b3467db 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmin.c @@ -108,7 +108,7 @@ vfloat64m1_t test_vfredmin_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector, // // CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst, @@ -120,7 +120,7 @@ vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst, // // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst, @@ -132,7 +132,7 @@ vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst, // // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst, @@ -144,7 +144,7 @@ vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst, // // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst, @@ -156,7 +156,7 @@ vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst, // // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst, @@ -168,7 +168,7 @@ vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst, // // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst, @@ -180,7 +180,7 @@ vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst, // // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst, @@ -192,7 +192,7 @@ vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst, // // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst, @@ -204,7 +204,7 @@ vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst, // // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c index d280047..8c82c78 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c @@ -108,7 +108,7 @@ vfloat64m1_t test_vfredsum_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector, // // CHECK-RV64-LABEL: @test_vfredsum_vs_f32mf2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredsum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst, @@ -120,7 +120,7 @@ vfloat32m1_t test_vfredsum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst, // // CHECK-RV64-LABEL: @test_vfredsum_vs_f32m1_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredsum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst, @@ -132,7 +132,7 @@ vfloat32m1_t test_vfredsum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst, // // CHECK-RV64-LABEL: @test_vfredsum_vs_f32m2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredsum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst, @@ -144,7 +144,7 @@ vfloat32m1_t test_vfredsum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst, // // CHECK-RV64-LABEL: @test_vfredsum_vs_f32m4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredsum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst, @@ -156,7 +156,7 @@ vfloat32m1_t test_vfredsum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst, // // CHECK-RV64-LABEL: @test_vfredsum_vs_f32m8_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredsum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst, @@ -168,7 +168,7 @@ vfloat32m1_t test_vfredsum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst, // // CHECK-RV64-LABEL: @test_vfredsum_vs_f64m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredsum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst, @@ -180,7 +180,7 @@ vfloat64m1_t test_vfredsum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst, // // CHECK-RV64-LABEL: @test_vfredsum_vs_f64m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredsum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst, @@ -192,7 +192,7 @@ vfloat64m1_t test_vfredsum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst, // // CHECK-RV64-LABEL: @test_vfredsum_vs_f64m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredsum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst, @@ -204,7 +204,7 @@ vfloat64m1_t test_vfredsum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst, // // CHECK-RV64-LABEL: @test_vfredsum_vs_f64m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredsum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst, @@ -324,7 +324,7 @@ vfloat64m1_t test_vfredosum_vs_f64m8_f64m1(vfloat64m1_t dst, // // CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst, @@ -336,7 +336,7 @@ vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst, // // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst, @@ -348,7 +348,7 @@ vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst, // // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst, @@ -360,7 +360,7 @@ vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst, // // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst, @@ -372,7 +372,7 @@ vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst, // // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst, @@ -384,7 +384,7 @@ vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst, // // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst, @@ -396,7 +396,7 @@ vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst, // // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst, @@ -408,7 +408,7 @@ vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst, // // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst, @@ -420,7 +420,7 @@ vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst, // // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsqrt7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsqrt7.c index c712f40..b20cd8c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsqrt7.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsqrt7.c @@ -98,7 +98,7 @@ vfloat64m8_t test_vfrsqrt7_v_f64m8(vfloat64m8_t op1, size_t vl) { // // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrsqrt7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -109,7 +109,7 @@ vfloat32mf2_t test_vfrsqrt7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrsqrt7_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -120,7 +120,7 @@ vfloat32m1_t test_vfrsqrt7_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrsqrt7_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -131,7 +131,7 @@ vfloat32m2_t test_vfrsqrt7_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrsqrt7_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -142,7 +142,7 @@ vfloat32m4_t test_vfrsqrt7_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrsqrt7_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -153,7 +153,7 @@ vfloat32m8_t test_vfrsqrt7_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrsqrt7_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -164,7 +164,7 @@ vfloat64m1_t test_vfrsqrt7_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrsqrt7_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -175,7 +175,7 @@ vfloat64m2_t test_vfrsqrt7_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrsqrt7_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -186,7 +186,7 @@ vfloat64m4_t test_vfrsqrt7_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrsqrt7_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c index 95a64ef..883ed13 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c @@ -98,7 +98,7 @@ vfloat64m8_t test_vfrsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { // // CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -109,7 +109,7 @@ vfloat32mf2_t test_vfrsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -120,7 +120,7 @@ vfloat32m1_t test_vfrsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -131,7 +131,7 @@ vfloat32m2_t test_vfrsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -142,7 +142,7 @@ vfloat32m4_t test_vfrsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -153,7 +153,7 @@ vfloat32m8_t test_vfrsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -164,7 +164,7 @@ vfloat64m1_t test_vfrsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -175,7 +175,7 @@ vfloat64m2_t test_vfrsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -186,7 +186,7 @@ vfloat64m4_t test_vfrsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c index 279940d..cb8bf8a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c @@ -575,7 +575,7 @@ vfloat64m8_t test_vfsgnjx_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { // // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnj_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -587,7 +587,7 @@ vfloat32mf2_t test_vfsgnj_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnj_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -598,7 +598,7 @@ vfloat32mf2_t test_vfsgnj_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnj_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -610,7 +610,7 @@ vfloat32m1_t test_vfsgnj_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnj_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -621,7 +621,7 @@ vfloat32m1_t test_vfsgnj_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnj_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -633,7 +633,7 @@ vfloat32m2_t test_vfsgnj_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnj_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -644,7 +644,7 @@ vfloat32m2_t test_vfsgnj_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnj_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -656,7 +656,7 @@ vfloat32m4_t test_vfsgnj_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnj_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -667,7 +667,7 @@ vfloat32m4_t test_vfsgnj_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnj_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -679,7 +679,7 @@ vfloat32m8_t test_vfsgnj_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnj_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -690,7 +690,7 @@ vfloat32m8_t test_vfsgnj_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnj_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -702,7 +702,7 @@ vfloat64m1_t test_vfsgnj_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnj_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -713,7 +713,7 @@ vfloat64m1_t test_vfsgnj_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnj_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -725,7 +725,7 @@ vfloat64m2_t test_vfsgnj_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnj_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -736,7 +736,7 @@ vfloat64m2_t test_vfsgnj_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnj_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -748,7 +748,7 @@ vfloat64m4_t test_vfsgnj_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnj_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -759,7 +759,7 @@ vfloat64m4_t test_vfsgnj_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnj_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -771,7 +771,7 @@ vfloat64m8_t test_vfsgnj_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnj_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -782,7 +782,7 @@ vfloat64m8_t test_vfsgnj_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjn_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -794,7 +794,7 @@ vfloat32mf2_t test_vfsgnjn_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjn_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -806,7 +806,7 @@ vfloat32mf2_t test_vfsgnjn_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjn_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -818,7 +818,7 @@ vfloat32m1_t test_vfsgnjn_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjn_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -829,7 +829,7 @@ vfloat32m1_t test_vfsgnjn_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjn_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -841,7 +841,7 @@ vfloat32m2_t test_vfsgnjn_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjn_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -852,7 +852,7 @@ vfloat32m2_t test_vfsgnjn_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjn_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -864,7 +864,7 @@ vfloat32m4_t test_vfsgnjn_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjn_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -875,7 +875,7 @@ vfloat32m4_t test_vfsgnjn_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjn_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -887,7 +887,7 @@ vfloat32m8_t test_vfsgnjn_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjn_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -898,7 +898,7 @@ vfloat32m8_t test_vfsgnjn_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjn_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -910,7 +910,7 @@ vfloat64m1_t test_vfsgnjn_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjn_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -921,7 +921,7 @@ vfloat64m1_t test_vfsgnjn_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjn_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -933,7 +933,7 @@ vfloat64m2_t test_vfsgnjn_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjn_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -944,7 +944,7 @@ vfloat64m2_t test_vfsgnjn_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjn_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -956,7 +956,7 @@ vfloat64m4_t test_vfsgnjn_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjn_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -967,7 +967,7 @@ vfloat64m4_t test_vfsgnjn_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjn_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -979,7 +979,7 @@ vfloat64m8_t test_vfsgnjn_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjn_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -990,7 +990,7 @@ vfloat64m8_t test_vfsgnjn_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjx_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -1002,7 +1002,7 @@ vfloat32mf2_t test_vfsgnjx_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjx_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -1014,7 +1014,7 @@ vfloat32mf2_t test_vfsgnjx_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjx_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -1026,7 +1026,7 @@ vfloat32m1_t test_vfsgnjx_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjx_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -1037,7 +1037,7 @@ vfloat32m1_t test_vfsgnjx_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjx_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -1049,7 +1049,7 @@ vfloat32m2_t test_vfsgnjx_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjx_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -1060,7 +1060,7 @@ vfloat32m2_t test_vfsgnjx_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjx_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -1072,7 +1072,7 @@ vfloat32m4_t test_vfsgnjx_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjx_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -1083,7 +1083,7 @@ vfloat32m4_t test_vfsgnjx_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjx_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -1095,7 +1095,7 @@ vfloat32m8_t test_vfsgnjx_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjx_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -1106,7 +1106,7 @@ vfloat32m8_t test_vfsgnjx_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjx_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -1118,7 +1118,7 @@ vfloat64m1_t test_vfsgnjx_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjx_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -1129,7 +1129,7 @@ vfloat64m1_t test_vfsgnjx_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjx_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -1141,7 +1141,7 @@ vfloat64m2_t test_vfsgnjx_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjx_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -1152,7 +1152,7 @@ vfloat64m2_t test_vfsgnjx_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjx_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -1164,7 +1164,7 @@ vfloat64m4_t test_vfsgnjx_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjx_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -1175,7 +1175,7 @@ vfloat64m4_t test_vfsgnjx_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjx_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -1187,7 +1187,7 @@ vfloat64m8_t test_vfsgnjx_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjx_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c index e25169c..d14f8b7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c @@ -107,7 +107,7 @@ vfloat64m8_t test_vfslide1down_vf_f64m8(vfloat64m8_t src, double value, // // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfslide1down_vf_f32mf2_m(vbool64_t mask, @@ -120,7 +120,7 @@ vfloat32mf2_t test_vfslide1down_vf_f32mf2_m(vbool64_t mask, // // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfslide1down_vf_f32m1_m(vbool32_t mask, @@ -133,7 +133,7 @@ vfloat32m1_t test_vfslide1down_vf_f32m1_m(vbool32_t mask, // // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfslide1down_vf_f32m2_m(vbool16_t mask, @@ -146,7 +146,7 @@ vfloat32m2_t test_vfslide1down_vf_f32m2_m(vbool16_t mask, // // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfslide1down_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -158,7 +158,7 @@ vfloat32m4_t test_vfslide1down_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfslide1down_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -170,7 +170,7 @@ vfloat32m8_t test_vfslide1down_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfslide1down_vf_f64m1_m(vbool64_t mask, @@ -183,7 +183,7 @@ vfloat64m1_t test_vfslide1down_vf_f64m1_m(vbool64_t mask, // // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfslide1down_vf_f64m2_m(vbool32_t mask, @@ -196,7 +196,7 @@ vfloat64m2_t test_vfslide1down_vf_f64m2_m(vbool32_t mask, // // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfslide1down_vf_f64m4_m(vbool16_t mask, @@ -209,7 +209,7 @@ vfloat64m4_t test_vfslide1down_vf_f64m4_m(vbool16_t mask, // // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfslide1down_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c index 1768558..ff45b8a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c @@ -107,7 +107,7 @@ vfloat64m8_t test_vfslide1up_vf_f64m8(vfloat64m8_t src, double value, // // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfslide1up_vf_f32mf2_m(vbool64_t mask, @@ -120,7 +120,7 @@ vfloat32mf2_t test_vfslide1up_vf_f32mf2_m(vbool64_t mask, // // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfslide1up_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -132,7 +132,7 @@ vfloat32m1_t test_vfslide1up_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfslide1up_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -144,7 +144,7 @@ vfloat32m2_t test_vfslide1up_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfslide1up_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -156,7 +156,7 @@ vfloat32m4_t test_vfslide1up_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfslide1up_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -168,7 +168,7 @@ vfloat32m8_t test_vfslide1up_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfslide1up_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -180,7 +180,7 @@ vfloat64m1_t test_vfslide1up_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfslide1up_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -192,7 +192,7 @@ vfloat64m2_t test_vfslide1up_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfslide1up_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -204,7 +204,7 @@ vfloat64m4_t test_vfslide1up_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfslide1up_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsqrt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsqrt.c index 1d54c45..e7c49f3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsqrt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsqrt.c @@ -98,7 +98,7 @@ vfloat64m8_t test_vfsqrt_v_f64m8(vfloat64m8_t op1, size_t vl) { // // CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsqrt_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -109,7 +109,7 @@ vfloat32mf2_t test_vfsqrt_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsqrt_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -120,7 +120,7 @@ vfloat32m1_t test_vfsqrt_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsqrt_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -131,7 +131,7 @@ vfloat32m2_t test_vfsqrt_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsqrt_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -142,7 +142,7 @@ vfloat32m4_t test_vfsqrt_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsqrt_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -153,7 +153,7 @@ vfloat32m8_t test_vfsqrt_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsqrt_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -164,7 +164,7 @@ vfloat64m1_t test_vfsqrt_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsqrt_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -175,7 +175,7 @@ vfloat64m2_t test_vfsqrt_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsqrt_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -186,7 +186,7 @@ vfloat64m4_t test_vfsqrt_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsqrt_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c index 9088855..43a2489 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c @@ -197,7 +197,7 @@ vfloat64m8_t test_vfsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { // // CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -209,7 +209,7 @@ vfloat32mf2_t test_vfsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -220,7 +220,7 @@ vfloat32mf2_t test_vfsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsub_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -232,7 +232,7 @@ vfloat32m1_t test_vfsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsub_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -243,7 +243,7 @@ vfloat32m1_t test_vfsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsub_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -255,7 +255,7 @@ vfloat32m2_t test_vfsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsub_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -266,7 +266,7 @@ vfloat32m2_t test_vfsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsub_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -278,7 +278,7 @@ vfloat32m4_t test_vfsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsub_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -289,7 +289,7 @@ vfloat32m4_t test_vfsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsub_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -301,7 +301,7 @@ vfloat32m8_t test_vfsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsub_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -312,7 +312,7 @@ vfloat32m8_t test_vfsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsub_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -324,7 +324,7 @@ vfloat64m1_t test_vfsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsub_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -335,7 +335,7 @@ vfloat64m1_t test_vfsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsub_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -347,7 +347,7 @@ vfloat64m2_t test_vfsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsub_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -358,7 +358,7 @@ vfloat64m2_t test_vfsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsub_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -370,7 +370,7 @@ vfloat64m4_t test_vfsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsub_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -381,7 +381,7 @@ vfloat64m4_t test_vfsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsub_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -393,7 +393,7 @@ vfloat64m8_t test_vfsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfsub_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c index 9dd2f87..26f6b81 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c @@ -176,7 +176,7 @@ vfloat64m8_t test_vfwadd_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) { // // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -188,7 +188,7 @@ vfloat64m1_t test_vfwadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -199,7 +199,7 @@ vfloat64m1_t test_vfwadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_wv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -211,7 +211,7 @@ vfloat64m1_t test_vfwadd_wv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_wf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -222,7 +222,7 @@ vfloat64m1_t test_vfwadd_wf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -234,7 +234,7 @@ vfloat64m2_t test_vfwadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -245,7 +245,7 @@ vfloat64m2_t test_vfwadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_wv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -257,7 +257,7 @@ vfloat64m2_t test_vfwadd_wv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_wf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -268,7 +268,7 @@ vfloat64m2_t test_vfwadd_wf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -280,7 +280,7 @@ vfloat64m4_t test_vfwadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -291,7 +291,7 @@ vfloat64m4_t test_vfwadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_wv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -303,7 +303,7 @@ vfloat64m4_t test_vfwadd_wv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_wf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -314,7 +314,7 @@ vfloat64m4_t test_vfwadd_wf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -326,7 +326,7 @@ vfloat64m8_t test_vfwadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -337,7 +337,7 @@ vfloat64m8_t test_vfwadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_wv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -349,7 +349,7 @@ vfloat64m8_t test_vfwadd_wv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_wf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwcvt.c index 64ecbb9..bea98de 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwcvt.c @@ -388,7 +388,7 @@ vfloat64m8_t test_vfwcvt_f_f_v_f64m8(vfloat32m4_t src, size_t vl) { // // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_m(vbool64_t mask, @@ -400,7 +400,7 @@ vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_m(vbool64_t mask, // // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwcvt_f_x_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -411,7 +411,7 @@ vfloat32m1_t test_vfwcvt_f_x_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwcvt_f_x_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -422,7 +422,7 @@ vfloat32m2_t test_vfwcvt_f_x_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwcvt_f_x_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -433,7 +433,7 @@ vfloat32m4_t test_vfwcvt_f_x_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwcvt_f_x_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -444,7 +444,7 @@ vfloat32m8_t test_vfwcvt_f_x_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_m(vbool64_t mask, @@ -456,7 +456,7 @@ vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_m(vbool64_t mask, // // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -467,7 +467,7 @@ vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -478,7 +478,7 @@ vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -489,7 +489,7 @@ vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -500,7 +500,7 @@ vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfwcvt_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -511,7 +511,7 @@ vint64m1_t test_vfwcvt_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -522,7 +522,7 @@ vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfwcvt_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -533,7 +533,7 @@ vint64m2_t test_vfwcvt_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -544,7 +544,7 @@ vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfwcvt_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -555,7 +555,7 @@ vint64m4_t test_vfwcvt_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -566,7 +566,7 @@ vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfwcvt_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -577,7 +577,7 @@ vint64m8_t test_vfwcvt_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -588,7 +588,7 @@ vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfwcvt_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -599,7 +599,7 @@ vuint64m1_t test_vfwcvt_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, @@ -611,7 +611,7 @@ vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, // // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfwcvt_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -622,7 +622,7 @@ vuint64m2_t test_vfwcvt_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, @@ -634,7 +634,7 @@ vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, // // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfwcvt_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -645,7 +645,7 @@ vuint64m4_t test_vfwcvt_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, @@ -657,7 +657,7 @@ vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, // // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfwcvt_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, @@ -668,7 +668,7 @@ vuint64m8_t test_vfwcvt_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, @@ -679,7 +679,7 @@ vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwcvt_f_x_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -690,7 +690,7 @@ vfloat64m1_t test_vfwcvt_f_x_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwcvt_f_x_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -701,7 +701,7 @@ vfloat64m2_t test_vfwcvt_f_x_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwcvt_f_x_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -712,7 +712,7 @@ vfloat64m4_t test_vfwcvt_f_x_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwcvt_f_x_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -723,7 +723,7 @@ vfloat64m8_t test_vfwcvt_f_x_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -734,7 +734,7 @@ vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -745,7 +745,7 @@ vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -756,7 +756,7 @@ vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -767,7 +767,7 @@ vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwcvt_f_f_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -778,7 +778,7 @@ vfloat64m1_t test_vfwcvt_f_f_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwcvt_f_f_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -789,7 +789,7 @@ vfloat64m2_t test_vfwcvt_f_f_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwcvt_f_f_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -800,7 +800,7 @@ vfloat64m4_t test_vfwcvt_f_f_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwcvt_f_f_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmacc.c index 208193d..6126db8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmacc.c @@ -96,7 +96,7 @@ vfloat64m8_t test_vfwmacc_vf_f64m8(vfloat64m8_t acc, float op1, // // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -108,7 +108,7 @@ vfloat64m1_t test_vfwmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, // // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -119,7 +119,7 @@ vfloat64m1_t test_vfwmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, // // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -131,7 +131,7 @@ vfloat64m2_t test_vfwmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, // // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -142,7 +142,7 @@ vfloat64m2_t test_vfwmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, // // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -154,7 +154,7 @@ vfloat64m4_t test_vfwmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, // // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -165,7 +165,7 @@ vfloat64m4_t test_vfwmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, // // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -177,7 +177,7 @@ vfloat64m8_t test_vfwmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, // // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, float op1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmsac.c index b1bf8f5..d2e35dc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmsac.c @@ -96,7 +96,7 @@ vfloat64m8_t test_vfwmsac_vf_f64m8(vfloat64m8_t acc, float op1, // // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -108,7 +108,7 @@ vfloat64m1_t test_vfwmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, // // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -119,7 +119,7 @@ vfloat64m1_t test_vfwmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, // // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -131,7 +131,7 @@ vfloat64m2_t test_vfwmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, // // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -142,7 +142,7 @@ vfloat64m2_t test_vfwmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, // // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -154,7 +154,7 @@ vfloat64m4_t test_vfwmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, // // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -165,7 +165,7 @@ vfloat64m4_t test_vfwmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, // // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -177,7 +177,7 @@ vfloat64m8_t test_vfwmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, // // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, float op1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c index b5ef0b1..eb1ba18 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c @@ -92,7 +92,7 @@ vfloat64m8_t test_vfwmul_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { // // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmul_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -104,7 +104,7 @@ vfloat64m1_t test_vfwmul_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmul_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -115,7 +115,7 @@ vfloat64m1_t test_vfwmul_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmul_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -127,7 +127,7 @@ vfloat64m2_t test_vfwmul_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmul_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -138,7 +138,7 @@ vfloat64m2_t test_vfwmul_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmul_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -150,7 +150,7 @@ vfloat64m4_t test_vfwmul_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmul_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -161,7 +161,7 @@ vfloat64m4_t test_vfwmul_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmul_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -173,7 +173,7 @@ vfloat64m8_t test_vfwmul_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmacc.c index 7264c4c..f8a260a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmacc.c @@ -96,7 +96,7 @@ vfloat64m8_t test_vfwnmacc_vf_f64m8(vfloat64m8_t acc, float op1, // // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -108,7 +108,7 @@ vfloat64m1_t test_vfwnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, // // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -119,7 +119,7 @@ vfloat64m1_t test_vfwnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, // // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -131,7 +131,7 @@ vfloat64m2_t test_vfwnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, // // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -142,7 +142,7 @@ vfloat64m2_t test_vfwnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, // // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -154,7 +154,7 @@ vfloat64m4_t test_vfwnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, // // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -165,7 +165,7 @@ vfloat64m4_t test_vfwnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, // // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -177,7 +177,7 @@ vfloat64m8_t test_vfwnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, // // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmsac.c index a006991..a5e5cc7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmsac.c @@ -96,7 +96,7 @@ vfloat64m8_t test_vfwnmsac_vf_f64m8(vfloat64m8_t acc, float op1, // // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -108,7 +108,7 @@ vfloat64m1_t test_vfwnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, // // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -119,7 +119,7 @@ vfloat64m1_t test_vfwnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, // // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -131,7 +131,7 @@ vfloat64m2_t test_vfwnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, // // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -142,7 +142,7 @@ vfloat64m2_t test_vfwnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, // // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -154,7 +154,7 @@ vfloat64m4_t test_vfwnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, // // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -165,7 +165,7 @@ vfloat64m4_t test_vfwnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, // // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -177,7 +177,7 @@ vfloat64m8_t test_vfwnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, // // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c index e70ef4e..be390c6 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c @@ -68,7 +68,7 @@ vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1(vfloat64m1_t dst, // // CHECK-RV64-LABEL: @test_vfwredsum_vs_f32mf2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredsum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dst, @@ -80,7 +80,7 @@ vfloat64m1_t test_vfwredsum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dst, // // CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dst, @@ -92,7 +92,7 @@ vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dst, // // CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dst, @@ -104,7 +104,7 @@ vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dst, // // CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dst, @@ -116,7 +116,7 @@ vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dst, // // CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dst, @@ -188,7 +188,7 @@ vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1(vfloat64m1_t dst, // // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dst, @@ -200,7 +200,7 @@ vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dst, // // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dst, @@ -212,7 +212,7 @@ vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dst, // // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dst, @@ -224,7 +224,7 @@ vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dst, // // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dst, @@ -236,7 +236,7 @@ vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dst, // // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dst, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c index 441d7a6..e20c244 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c @@ -176,7 +176,7 @@ vfloat64m8_t test_vfwsub_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) { // // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -188,7 +188,7 @@ vfloat64m1_t test_vfwsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -199,7 +199,7 @@ vfloat64m1_t test_vfwsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_wv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -211,7 +211,7 @@ vfloat64m1_t test_vfwsub_wv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_wf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -222,7 +222,7 @@ vfloat64m1_t test_vfwsub_wf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -234,7 +234,7 @@ vfloat64m2_t test_vfwsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -245,7 +245,7 @@ vfloat64m2_t test_vfwsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_wv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -257,7 +257,7 @@ vfloat64m2_t test_vfwsub_wv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_wf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -268,7 +268,7 @@ vfloat64m2_t test_vfwsub_wf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -280,7 +280,7 @@ vfloat64m4_t test_vfwsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -291,7 +291,7 @@ vfloat64m4_t test_vfwsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_wv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -303,7 +303,7 @@ vfloat64m4_t test_vfwsub_wv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_wf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -314,7 +314,7 @@ vfloat64m4_t test_vfwsub_wf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -326,7 +326,7 @@ vfloat64m8_t test_vfwsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -337,7 +337,7 @@ vfloat64m8_t test_vfwsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_wv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -349,7 +349,7 @@ vfloat64m8_t test_vfwsub_wv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_wf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vid.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vid.c index 50b34a7..49b319e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vid.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vid.c @@ -7,7 +7,7 @@ // // CHECK-RV64-LABEL: @test_vid_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vid_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, @@ -18,7 +18,7 @@ vuint8mf8_t test_vid_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, // // CHECK-RV64-LABEL: @test_vid_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vid_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, @@ -29,7 +29,7 @@ vuint8mf4_t test_vid_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vid_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vid_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, @@ -40,7 +40,7 @@ vuint8mf2_t test_vid_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vid_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vid_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, size_t vl) { @@ -50,7 +50,7 @@ vuint8m1_t test_vid_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, size_t vl) { // // CHECK-RV64-LABEL: @test_vid_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vid_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, size_t vl) { @@ -60,7 +60,7 @@ vuint8m2_t test_vid_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, size_t vl) { // // CHECK-RV64-LABEL: @test_vid_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vid_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, size_t vl) { @@ -70,7 +70,7 @@ vuint8m4_t test_vid_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, size_t vl) { // // CHECK-RV64-LABEL: @test_vid_v_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vid_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, size_t vl) { @@ -80,7 +80,7 @@ vuint8m8_t test_vid_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, size_t vl) { // // CHECK-RV64-LABEL: @test_vid_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vid_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -91,7 +91,7 @@ vuint16mf4_t test_vid_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vid_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vid_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -102,7 +102,7 @@ vuint16mf2_t test_vid_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vid_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vid_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -113,7 +113,7 @@ vuint16m1_t test_vid_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vid_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vid_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -124,7 +124,7 @@ vuint16m2_t test_vid_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vid_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vid_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -135,7 +135,7 @@ vuint16m4_t test_vid_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vid_v_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vid_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -146,7 +146,7 @@ vuint16m8_t test_vid_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vid_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vid_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -157,7 +157,7 @@ vuint32mf2_t test_vid_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vid_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vid_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -168,7 +168,7 @@ vuint32m1_t test_vid_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vid_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vid_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -179,7 +179,7 @@ vuint32m2_t test_vid_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vid_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vid_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -190,7 +190,7 @@ vuint32m4_t test_vid_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vid_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vid_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -201,7 +201,7 @@ vuint32m8_t test_vid_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vid_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vid_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -212,7 +212,7 @@ vuint64m1_t test_vid_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vid_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vid_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -223,7 +223,7 @@ vuint64m2_t test_vid_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vid_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vid_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -234,7 +234,7 @@ vuint64m4_t test_vid_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vid_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vid_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/viota.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/viota.c index fbd199e..d90f8c0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/viota.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/viota.c @@ -7,7 +7,7 @@ // // CHECK-RV64-LABEL: @test_viota_m_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_viota_m_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, @@ -18,7 +18,7 @@ vuint8mf8_t test_viota_m_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, // // CHECK-RV64-LABEL: @test_viota_m_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_viota_m_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, @@ -29,7 +29,7 @@ vuint8mf4_t test_viota_m_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_viota_m_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_viota_m_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, @@ -40,7 +40,7 @@ vuint8mf2_t test_viota_m_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_viota_m_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_viota_m_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, @@ -51,7 +51,7 @@ vuint8m1_t test_viota_m_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, // // CHECK-RV64-LABEL: @test_viota_m_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_viota_m_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, @@ -62,7 +62,7 @@ vuint8m2_t test_viota_m_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, // // CHECK-RV64-LABEL: @test_viota_m_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_viota_m_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, @@ -73,7 +73,7 @@ vuint8m4_t test_viota_m_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, // // CHECK-RV64-LABEL: @test_viota_m_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_viota_m_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, @@ -84,7 +84,7 @@ vuint8m8_t test_viota_m_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, // // CHECK-RV64-LABEL: @test_viota_m_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_viota_m_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -95,7 +95,7 @@ vuint16mf4_t test_viota_m_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_viota_m_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_viota_m_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -106,7 +106,7 @@ vuint16mf2_t test_viota_m_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_viota_m_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_viota_m_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -117,7 +117,7 @@ vuint16m1_t test_viota_m_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_viota_m_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_viota_m_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -128,7 +128,7 @@ vuint16m2_t test_viota_m_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_viota_m_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_viota_m_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -139,7 +139,7 @@ vuint16m4_t test_viota_m_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_viota_m_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_viota_m_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -150,7 +150,7 @@ vuint16m8_t test_viota_m_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_viota_m_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_viota_m_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -161,7 +161,7 @@ vuint32mf2_t test_viota_m_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_viota_m_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_viota_m_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -172,7 +172,7 @@ vuint32m1_t test_viota_m_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_viota_m_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_viota_m_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -183,7 +183,7 @@ vuint32m2_t test_viota_m_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_viota_m_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_viota_m_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -194,7 +194,7 @@ vuint32m4_t test_viota_m_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_viota_m_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_viota_m_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -205,7 +205,7 @@ vuint32m8_t test_viota_m_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_viota_m_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_viota_m_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -216,7 +216,7 @@ vuint64m1_t test_viota_m_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_viota_m_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_viota_m_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -227,7 +227,7 @@ vuint64m2_t test_viota_m_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_viota_m_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_viota_m_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -238,7 +238,7 @@ vuint64m4_t test_viota_m_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_viota_m_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_viota_m_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vle.c index 7122973..1a069c2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vle.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vle.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vle8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vle8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t vl) { @@ -20,7 +20,7 @@ vint8mf8_t test_vle8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_ // CHECK-RV64-LABEL: @test_vle8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vle8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t vl) { @@ -31,7 +31,7 @@ vint8mf4_t test_vle8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_ // CHECK-RV64-LABEL: @test_vle8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vle8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t vl) { @@ -42,7 +42,7 @@ vint8mf2_t test_vle8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_ // CHECK-RV64-LABEL: @test_vle8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vle8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t vl) { @@ -53,7 +53,7 @@ vint8m1_t test_vle8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *b // CHECK-RV64-LABEL: @test_vle8_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vle8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t vl) { @@ -64,7 +64,7 @@ vint8m2_t test_vle8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *b // CHECK-RV64-LABEL: @test_vle8_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m4_t test_vle8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t vl) { @@ -75,7 +75,7 @@ vint8m4_t test_vle8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *b // CHECK-RV64-LABEL: @test_vle8_v_i8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vle8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t vl) { @@ -86,7 +86,7 @@ vint8m8_t test_vle8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *b // CHECK-RV64-LABEL: @test_vle16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vle16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t vl) { @@ -97,7 +97,7 @@ vint16mf4_t test_vle16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const i // CHECK-RV64-LABEL: @test_vle16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vle16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t vl) { @@ -108,7 +108,7 @@ vint16mf2_t test_vle16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const i // CHECK-RV64-LABEL: @test_vle16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vle16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t vl) { @@ -119,7 +119,7 @@ vint16m1_t test_vle16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int1 // CHECK-RV64-LABEL: @test_vle16_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vle16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t vl) { @@ -130,7 +130,7 @@ vint16m2_t test_vle16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16 // CHECK-RV64-LABEL: @test_vle16_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vle16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t vl) { @@ -141,7 +141,7 @@ vint16m4_t test_vle16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16 // CHECK-RV64-LABEL: @test_vle16_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vle16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t vl) { @@ -152,7 +152,7 @@ vint16m8_t test_vle16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16 // CHECK-RV64-LABEL: @test_vle32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vle32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t vl) { @@ -163,7 +163,7 @@ vint32mf2_t test_vle32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const i // CHECK-RV64-LABEL: @test_vle32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vle32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t vl) { @@ -174,7 +174,7 @@ vint32m1_t test_vle32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int3 // CHECK-RV64-LABEL: @test_vle32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vle32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t vl) { @@ -185,7 +185,7 @@ vint32m2_t test_vle32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int3 // CHECK-RV64-LABEL: @test_vle32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vle32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t vl) { @@ -196,7 +196,7 @@ vint32m4_t test_vle32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32 // CHECK-RV64-LABEL: @test_vle32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vle32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t vl) { @@ -207,7 +207,7 @@ vint32m8_t test_vle32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32 // CHECK-RV64-LABEL: @test_vle64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vle64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t vl) { @@ -218,7 +218,7 @@ vint64m1_t test_vle64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int6 // CHECK-RV64-LABEL: @test_vle64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vle64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t vl) { @@ -229,7 +229,7 @@ vint64m2_t test_vle64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int6 // CHECK-RV64-LABEL: @test_vle64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vle64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t vl) { @@ -240,7 +240,7 @@ vint64m4_t test_vle64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int6 // CHECK-RV64-LABEL: @test_vle64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vle64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t vl) { @@ -251,7 +251,7 @@ vint64m8_t test_vle64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64 // CHECK-RV64-LABEL: @test_vle8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vle8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t vl) { @@ -262,7 +262,7 @@ vuint8mf8_t test_vle8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uin // CHECK-RV64-LABEL: @test_vle8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vle8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t vl) { @@ -273,7 +273,7 @@ vuint8mf4_t test_vle8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uin // CHECK-RV64-LABEL: @test_vle8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vle8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t vl) { @@ -284,7 +284,7 @@ vuint8mf2_t test_vle8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uin // CHECK-RV64-LABEL: @test_vle8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vle8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t vl) { @@ -295,7 +295,7 @@ vuint8m1_t test_vle8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t // CHECK-RV64-LABEL: @test_vle8_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vle8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t vl) { @@ -306,7 +306,7 @@ vuint8m2_t test_vle8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t // CHECK-RV64-LABEL: @test_vle8_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m4_t test_vle8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t vl) { @@ -317,7 +317,7 @@ vuint8m4_t test_vle8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t // CHECK-RV64-LABEL: @test_vle8_v_u8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vle8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t vl) { @@ -328,7 +328,7 @@ vuint8m8_t test_vle8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t // CHECK-RV64-LABEL: @test_vle16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vle16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t vl) { @@ -339,7 +339,7 @@ vuint16mf4_t test_vle16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const // CHECK-RV64-LABEL: @test_vle16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vle16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t vl) { @@ -350,7 +350,7 @@ vuint16mf2_t test_vle16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const // CHECK-RV64-LABEL: @test_vle16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vle16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t vl) { @@ -361,7 +361,7 @@ vuint16m1_t test_vle16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const ui // CHECK-RV64-LABEL: @test_vle16_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vle16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t vl) { @@ -372,7 +372,7 @@ vuint16m2_t test_vle16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uin // CHECK-RV64-LABEL: @test_vle16_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vle16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t vl) { @@ -383,7 +383,7 @@ vuint16m4_t test_vle16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uin // CHECK-RV64-LABEL: @test_vle16_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vle16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t vl) { @@ -394,7 +394,7 @@ vuint16m8_t test_vle16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uin // CHECK-RV64-LABEL: @test_vle32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vle32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t vl) { @@ -405,7 +405,7 @@ vuint32mf2_t test_vle32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const // CHECK-RV64-LABEL: @test_vle32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vle32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t vl) { @@ -416,7 +416,7 @@ vuint32m1_t test_vle32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const ui // CHECK-RV64-LABEL: @test_vle32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vle32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t vl) { @@ -427,7 +427,7 @@ vuint32m2_t test_vle32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const ui // CHECK-RV64-LABEL: @test_vle32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vle32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t vl) { @@ -438,7 +438,7 @@ vuint32m4_t test_vle32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uin // CHECK-RV64-LABEL: @test_vle32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vle32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t vl) { @@ -449,7 +449,7 @@ vuint32m8_t test_vle32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uin // CHECK-RV64-LABEL: @test_vle64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vle64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t vl) { @@ -460,7 +460,7 @@ vuint64m1_t test_vle64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const ui // CHECK-RV64-LABEL: @test_vle64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vle64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t vl) { @@ -471,7 +471,7 @@ vuint64m2_t test_vle64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const ui // CHECK-RV64-LABEL: @test_vle64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vle64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t vl) { @@ -482,7 +482,7 @@ vuint64m4_t test_vle64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const ui // CHECK-RV64-LABEL: @test_vle64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vle64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t vl) { @@ -493,7 +493,7 @@ vuint64m8_t test_vle64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uin // CHECK-RV64-LABEL: @test_vle32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vle32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, size_t vl) { @@ -504,7 +504,7 @@ vfloat32mf2_t test_vle32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, con // CHECK-RV64-LABEL: @test_vle32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vle32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, size_t vl) { @@ -515,7 +515,7 @@ vfloat32m1_t test_vle32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const // CHECK-RV64-LABEL: @test_vle32_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vle32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, size_t vl) { @@ -526,7 +526,7 @@ vfloat32m2_t test_vle32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const // CHECK-RV64-LABEL: @test_vle32_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vle32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, size_t vl) { @@ -537,7 +537,7 @@ vfloat32m4_t test_vle32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const f // CHECK-RV64-LABEL: @test_vle32_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vle32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, size_t vl) { @@ -548,7 +548,7 @@ vfloat32m8_t test_vle32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const f // CHECK-RV64-LABEL: @test_vle64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vle64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, size_t vl) { @@ -559,7 +559,7 @@ vfloat64m1_t test_vle64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const // CHECK-RV64-LABEL: @test_vle64_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vle64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, size_t vl) { @@ -570,7 +570,7 @@ vfloat64m2_t test_vle64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const // CHECK-RV64-LABEL: @test_vle64_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vle64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, size_t vl) { @@ -581,7 +581,7 @@ vfloat64m4_t test_vle64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const // CHECK-RV64-LABEL: @test_vle64_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vle64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxei.c index 167292d..e37ddb0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxei.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxei.c @@ -2111,7 +2111,7 @@ vfloat64m8_t test_vloxei64_v_f64m8(const double *base, vuint64m8_t bindex, size_ // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vloxei8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -2122,7 +2122,7 @@ vint8mf8_t test_vloxei8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const in // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vloxei8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -2133,7 +2133,7 @@ vint8mf4_t test_vloxei8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const in // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vloxei8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -2144,7 +2144,7 @@ vint8mf2_t test_vloxei8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const in // CHECK-RV64-LABEL: @test_vloxei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vloxei8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -2155,7 +2155,7 @@ vint8m1_t test_vloxei8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t // CHECK-RV64-LABEL: @test_vloxei8_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vloxei8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -2166,7 +2166,7 @@ vint8m2_t test_vloxei8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t // CHECK-RV64-LABEL: @test_vloxei8_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m4_t test_vloxei8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -2177,7 +2177,7 @@ vint8m4_t test_vloxei8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t // CHECK-RV64-LABEL: @test_vloxei8_v_i8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vloxei8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { @@ -2188,7 +2188,7 @@ vint8m8_t test_vloxei8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vloxei16_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -2199,7 +2199,7 @@ vint8mf8_t test_vloxei16_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const i // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vloxei16_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -2210,7 +2210,7 @@ vint8mf4_t test_vloxei16_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const i // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vloxei16_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -2221,7 +2221,7 @@ vint8mf2_t test_vloxei16_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const i // CHECK-RV64-LABEL: @test_vloxei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vloxei16_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -2232,7 +2232,7 @@ vint8m1_t test_vloxei16_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_ // CHECK-RV64-LABEL: @test_vloxei16_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vloxei16_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -2243,7 +2243,7 @@ vint8m2_t test_vloxei16_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_ // CHECK-RV64-LABEL: @test_vloxei16_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m4_t test_vloxei16_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -2254,7 +2254,7 @@ vint8m4_t test_vloxei16_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_ // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vloxei32_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -2265,7 +2265,7 @@ vint8mf8_t test_vloxei32_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const i // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vloxei32_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -2276,7 +2276,7 @@ vint8mf4_t test_vloxei32_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const i // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vloxei32_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -2287,7 +2287,7 @@ vint8mf2_t test_vloxei32_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const i // CHECK-RV64-LABEL: @test_vloxei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vloxei32_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -2298,7 +2298,7 @@ vint8m1_t test_vloxei32_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_ // CHECK-RV64-LABEL: @test_vloxei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vloxei32_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -2309,7 +2309,7 @@ vint8m2_t test_vloxei32_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_ // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vloxei64_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -2320,7 +2320,7 @@ vint8mf8_t test_vloxei64_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const i // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vloxei64_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -2331,7 +2331,7 @@ vint8mf4_t test_vloxei64_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const i // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vloxei64_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -2342,7 +2342,7 @@ vint8mf2_t test_vloxei64_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const i // CHECK-RV64-LABEL: @test_vloxei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vloxei64_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -2353,7 +2353,7 @@ vint8m1_t test_vloxei64_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_ // CHECK-RV64-LABEL: @test_vloxei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vloxei8_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -2364,7 +2364,7 @@ vint16mf4_t test_vloxei8_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vloxei8_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -2375,7 +2375,7 @@ vint16mf2_t test_vloxei8_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vloxei8_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -2386,7 +2386,7 @@ vint16m1_t test_vloxei8_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const in // CHECK-RV64-LABEL: @test_vloxei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vloxei8_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -2397,7 +2397,7 @@ vint16m2_t test_vloxei8_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int // CHECK-RV64-LABEL: @test_vloxei8_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vloxei8_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -2408,7 +2408,7 @@ vint16m4_t test_vloxei8_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int // CHECK-RV64-LABEL: @test_vloxei8_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vloxei8_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { @@ -2419,7 +2419,7 @@ vint16m8_t test_vloxei8_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int // CHECK-RV64-LABEL: @test_vloxei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vloxei16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -2430,7 +2430,7 @@ vint16mf4_t test_vloxei16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, cons // CHECK-RV64-LABEL: @test_vloxei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vloxei16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -2441,7 +2441,7 @@ vint16mf2_t test_vloxei16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, cons // CHECK-RV64-LABEL: @test_vloxei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vloxei16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -2452,7 +2452,7 @@ vint16m1_t test_vloxei16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const i // CHECK-RV64-LABEL: @test_vloxei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vloxei16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -2463,7 +2463,7 @@ vint16m2_t test_vloxei16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const in // CHECK-RV64-LABEL: @test_vloxei16_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vloxei16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -2474,7 +2474,7 @@ vint16m4_t test_vloxei16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const in // CHECK-RV64-LABEL: @test_vloxei16_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vloxei16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { @@ -2485,7 +2485,7 @@ vint16m8_t test_vloxei16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const in // CHECK-RV64-LABEL: @test_vloxei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vloxei32_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -2496,7 +2496,7 @@ vint16mf4_t test_vloxei32_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, cons // CHECK-RV64-LABEL: @test_vloxei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vloxei32_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -2507,7 +2507,7 @@ vint16mf2_t test_vloxei32_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, cons // CHECK-RV64-LABEL: @test_vloxei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vloxei32_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -2518,7 +2518,7 @@ vint16m1_t test_vloxei32_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const i // CHECK-RV64-LABEL: @test_vloxei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vloxei32_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -2529,7 +2529,7 @@ vint16m2_t test_vloxei32_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const in // CHECK-RV64-LABEL: @test_vloxei32_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vloxei32_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -2540,7 +2540,7 @@ vint16m4_t test_vloxei32_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const in // CHECK-RV64-LABEL: @test_vloxei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vloxei64_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -2551,7 +2551,7 @@ vint16mf4_t test_vloxei64_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, cons // CHECK-RV64-LABEL: @test_vloxei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vloxei64_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -2562,7 +2562,7 @@ vint16mf2_t test_vloxei64_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, cons // CHECK-RV64-LABEL: @test_vloxei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vloxei64_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -2573,7 +2573,7 @@ vint16m1_t test_vloxei64_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const i // CHECK-RV64-LABEL: @test_vloxei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vloxei64_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -2584,7 +2584,7 @@ vint16m2_t test_vloxei64_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const in // CHECK-RV64-LABEL: @test_vloxei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vloxei8_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -2595,7 +2595,7 @@ vint32mf2_t test_vloxei8_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vloxei8_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -2606,7 +2606,7 @@ vint32m1_t test_vloxei8_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const in // CHECK-RV64-LABEL: @test_vloxei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vloxei8_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -2617,7 +2617,7 @@ vint32m2_t test_vloxei8_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const in // CHECK-RV64-LABEL: @test_vloxei8_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vloxei8_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -2628,7 +2628,7 @@ vint32m4_t test_vloxei8_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int // CHECK-RV64-LABEL: @test_vloxei8_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vloxei8_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { @@ -2639,7 +2639,7 @@ vint32m8_t test_vloxei8_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int // CHECK-RV64-LABEL: @test_vloxei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vloxei16_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -2650,7 +2650,7 @@ vint32mf2_t test_vloxei16_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, cons // CHECK-RV64-LABEL: @test_vloxei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vloxei16_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -2661,7 +2661,7 @@ vint32m1_t test_vloxei16_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const i // CHECK-RV64-LABEL: @test_vloxei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vloxei16_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -2672,7 +2672,7 @@ vint32m2_t test_vloxei16_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const i // CHECK-RV64-LABEL: @test_vloxei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vloxei16_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -2683,7 +2683,7 @@ vint32m4_t test_vloxei16_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const in // CHECK-RV64-LABEL: @test_vloxei16_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vloxei16_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { @@ -2694,7 +2694,7 @@ vint32m8_t test_vloxei16_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const in // CHECK-RV64-LABEL: @test_vloxei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vloxei32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -2705,7 +2705,7 @@ vint32mf2_t test_vloxei32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, cons // CHECK-RV64-LABEL: @test_vloxei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vloxei32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -2716,7 +2716,7 @@ vint32m1_t test_vloxei32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const i // CHECK-RV64-LABEL: @test_vloxei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vloxei32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -2727,7 +2727,7 @@ vint32m2_t test_vloxei32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const i // CHECK-RV64-LABEL: @test_vloxei32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vloxei32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -2738,7 +2738,7 @@ vint32m4_t test_vloxei32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const in // CHECK-RV64-LABEL: @test_vloxei32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vloxei32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { @@ -2749,7 +2749,7 @@ vint32m8_t test_vloxei32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const in // CHECK-RV64-LABEL: @test_vloxei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vloxei64_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -2760,7 +2760,7 @@ vint32mf2_t test_vloxei64_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, cons // CHECK-RV64-LABEL: @test_vloxei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vloxei64_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -2771,7 +2771,7 @@ vint32m1_t test_vloxei64_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const i // CHECK-RV64-LABEL: @test_vloxei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vloxei64_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -2782,7 +2782,7 @@ vint32m2_t test_vloxei64_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const i // CHECK-RV64-LABEL: @test_vloxei64_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vloxei64_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -2793,7 +2793,7 @@ vint32m4_t test_vloxei64_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const in // CHECK-RV64-LABEL: @test_vloxei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vloxei8_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -2804,7 +2804,7 @@ vint64m1_t test_vloxei8_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const in // CHECK-RV64-LABEL: @test_vloxei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vloxei8_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -2815,7 +2815,7 @@ vint64m2_t test_vloxei8_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const in // CHECK-RV64-LABEL: @test_vloxei8_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vloxei8_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -2826,7 +2826,7 @@ vint64m4_t test_vloxei8_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const in // CHECK-RV64-LABEL: @test_vloxei8_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vloxei8_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { @@ -2837,7 +2837,7 @@ vint64m8_t test_vloxei8_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int // CHECK-RV64-LABEL: @test_vloxei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vloxei16_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -2848,7 +2848,7 @@ vint64m1_t test_vloxei16_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const i // CHECK-RV64-LABEL: @test_vloxei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vloxei16_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -2859,7 +2859,7 @@ vint64m2_t test_vloxei16_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const i // CHECK-RV64-LABEL: @test_vloxei16_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vloxei16_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -2870,7 +2870,7 @@ vint64m4_t test_vloxei16_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const i // CHECK-RV64-LABEL: @test_vloxei16_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vloxei16_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { @@ -2881,7 +2881,7 @@ vint64m8_t test_vloxei16_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const in // CHECK-RV64-LABEL: @test_vloxei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vloxei32_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -2892,7 +2892,7 @@ vint64m1_t test_vloxei32_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const i // CHECK-RV64-LABEL: @test_vloxei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vloxei32_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -2903,7 +2903,7 @@ vint64m2_t test_vloxei32_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const i // CHECK-RV64-LABEL: @test_vloxei32_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vloxei32_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -2914,7 +2914,7 @@ vint64m4_t test_vloxei32_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const i // CHECK-RV64-LABEL: @test_vloxei32_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vloxei32_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { @@ -2925,7 +2925,7 @@ vint64m8_t test_vloxei32_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const in // CHECK-RV64-LABEL: @test_vloxei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vloxei64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -2936,7 +2936,7 @@ vint64m1_t test_vloxei64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const i // CHECK-RV64-LABEL: @test_vloxei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vloxei64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -2947,7 +2947,7 @@ vint64m2_t test_vloxei64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const i // CHECK-RV64-LABEL: @test_vloxei64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vloxei64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -2958,7 +2958,7 @@ vint64m4_t test_vloxei64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const i // CHECK-RV64-LABEL: @test_vloxei64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vloxei64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { @@ -2969,7 +2969,7 @@ vint64m8_t test_vloxei64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const in // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vloxei8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -2980,7 +2980,7 @@ vuint8mf8_t test_vloxei8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vloxei8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -2991,7 +2991,7 @@ vuint8mf4_t test_vloxei8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vloxei8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -3002,7 +3002,7 @@ vuint8mf2_t test_vloxei8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vloxei8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -3013,7 +3013,7 @@ vuint8m1_t test_vloxei8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint // CHECK-RV64-LABEL: @test_vloxei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vloxei8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -3024,7 +3024,7 @@ vuint8m2_t test_vloxei8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint // CHECK-RV64-LABEL: @test_vloxei8_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m4_t test_vloxei8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -3035,7 +3035,7 @@ vuint8m4_t test_vloxei8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint // CHECK-RV64-LABEL: @test_vloxei8_v_u8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vloxei8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { @@ -3046,7 +3046,7 @@ vuint8m8_t test_vloxei8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vloxei16_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -3057,7 +3057,7 @@ vuint8mf8_t test_vloxei16_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vloxei16_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -3068,7 +3068,7 @@ vuint8mf4_t test_vloxei16_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vloxei16_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -3079,7 +3079,7 @@ vuint8mf2_t test_vloxei16_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vloxei16_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -3090,7 +3090,7 @@ vuint8m1_t test_vloxei16_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uin // CHECK-RV64-LABEL: @test_vloxei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vloxei16_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -3101,7 +3101,7 @@ vuint8m2_t test_vloxei16_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uin // CHECK-RV64-LABEL: @test_vloxei16_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m4_t test_vloxei16_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -3112,7 +3112,7 @@ vuint8m4_t test_vloxei16_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uin // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vloxei32_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -3123,7 +3123,7 @@ vuint8mf8_t test_vloxei32_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vloxei32_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -3134,7 +3134,7 @@ vuint8mf4_t test_vloxei32_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vloxei32_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -3145,7 +3145,7 @@ vuint8mf2_t test_vloxei32_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vloxei32_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -3156,7 +3156,7 @@ vuint8m1_t test_vloxei32_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uin // CHECK-RV64-LABEL: @test_vloxei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vloxei32_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -3167,7 +3167,7 @@ vuint8m2_t test_vloxei32_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uin // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vloxei64_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -3178,7 +3178,7 @@ vuint8mf8_t test_vloxei64_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vloxei64_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -3189,7 +3189,7 @@ vuint8mf4_t test_vloxei64_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vloxei64_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -3200,7 +3200,7 @@ vuint8mf2_t test_vloxei64_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vloxei64_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -3211,7 +3211,7 @@ vuint8m1_t test_vloxei64_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uin // CHECK-RV64-LABEL: @test_vloxei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vloxei8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -3222,7 +3222,7 @@ vuint16mf4_t test_vloxei8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, con // CHECK-RV64-LABEL: @test_vloxei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vloxei8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -3233,7 +3233,7 @@ vuint16mf2_t test_vloxei8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, con // CHECK-RV64-LABEL: @test_vloxei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vloxei8_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -3244,7 +3244,7 @@ vuint16m1_t test_vloxei8_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vloxei8_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -3255,7 +3255,7 @@ vuint16m2_t test_vloxei8_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const u // CHECK-RV64-LABEL: @test_vloxei8_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vloxei8_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -3266,7 +3266,7 @@ vuint16m4_t test_vloxei8_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const u // CHECK-RV64-LABEL: @test_vloxei8_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vloxei8_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { @@ -3277,7 +3277,7 @@ vuint16m8_t test_vloxei8_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const u // CHECK-RV64-LABEL: @test_vloxei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vloxei16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -3288,7 +3288,7 @@ vuint16mf4_t test_vloxei16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, co // CHECK-RV64-LABEL: @test_vloxei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vloxei16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -3299,7 +3299,7 @@ vuint16mf2_t test_vloxei16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, co // CHECK-RV64-LABEL: @test_vloxei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vloxei16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -3310,7 +3310,7 @@ vuint16m1_t test_vloxei16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vloxei16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -3321,7 +3321,7 @@ vuint16m2_t test_vloxei16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei16_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vloxei16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -3332,7 +3332,7 @@ vuint16m4_t test_vloxei16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei16_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vloxei16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { @@ -3343,7 +3343,7 @@ vuint16m8_t test_vloxei16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vloxei32_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -3354,7 +3354,7 @@ vuint16mf4_t test_vloxei32_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, co // CHECK-RV64-LABEL: @test_vloxei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vloxei32_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -3365,7 +3365,7 @@ vuint16mf2_t test_vloxei32_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, co // CHECK-RV64-LABEL: @test_vloxei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vloxei32_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -3376,7 +3376,7 @@ vuint16m1_t test_vloxei32_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vloxei32_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -3387,7 +3387,7 @@ vuint16m2_t test_vloxei32_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei32_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vloxei32_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -3398,7 +3398,7 @@ vuint16m4_t test_vloxei32_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vloxei64_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -3409,7 +3409,7 @@ vuint16mf4_t test_vloxei64_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, co // CHECK-RV64-LABEL: @test_vloxei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vloxei64_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -3420,7 +3420,7 @@ vuint16mf2_t test_vloxei64_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, co // CHECK-RV64-LABEL: @test_vloxei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vloxei64_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -3431,7 +3431,7 @@ vuint16m1_t test_vloxei64_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vloxei64_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -3442,7 +3442,7 @@ vuint16m2_t test_vloxei64_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vloxei8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -3453,7 +3453,7 @@ vuint32mf2_t test_vloxei8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, con // CHECK-RV64-LABEL: @test_vloxei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vloxei8_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -3464,7 +3464,7 @@ vuint32m1_t test_vloxei8_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vloxei8_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -3475,7 +3475,7 @@ vuint32m2_t test_vloxei8_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vloxei8_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -3486,7 +3486,7 @@ vuint32m4_t test_vloxei8_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const u // CHECK-RV64-LABEL: @test_vloxei8_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vloxei8_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { @@ -3497,7 +3497,7 @@ vuint32m8_t test_vloxei8_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const u // CHECK-RV64-LABEL: @test_vloxei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vloxei16_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -3508,7 +3508,7 @@ vuint32mf2_t test_vloxei16_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, co // CHECK-RV64-LABEL: @test_vloxei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vloxei16_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -3519,7 +3519,7 @@ vuint32m1_t test_vloxei16_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vloxei16_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -3530,7 +3530,7 @@ vuint32m2_t test_vloxei16_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei16_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vloxei16_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -3541,7 +3541,7 @@ vuint32m4_t test_vloxei16_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei16_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vloxei16_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { @@ -3552,7 +3552,7 @@ vuint32m8_t test_vloxei16_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vloxei32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -3563,7 +3563,7 @@ vuint32mf2_t test_vloxei32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, co // CHECK-RV64-LABEL: @test_vloxei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vloxei32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -3574,7 +3574,7 @@ vuint32m1_t test_vloxei32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vloxei32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -3585,7 +3585,7 @@ vuint32m2_t test_vloxei32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vloxei32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -3596,7 +3596,7 @@ vuint32m4_t test_vloxei32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vloxei32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { @@ -3607,7 +3607,7 @@ vuint32m8_t test_vloxei32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vloxei64_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -3618,7 +3618,7 @@ vuint32mf2_t test_vloxei64_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, co // CHECK-RV64-LABEL: @test_vloxei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vloxei64_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -3629,7 +3629,7 @@ vuint32m1_t test_vloxei64_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vloxei64_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -3640,7 +3640,7 @@ vuint32m2_t test_vloxei64_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei64_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vloxei64_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -3651,7 +3651,7 @@ vuint32m4_t test_vloxei64_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vloxei8_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -3662,7 +3662,7 @@ vuint64m1_t test_vloxei8_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vloxei8_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -3673,7 +3673,7 @@ vuint64m2_t test_vloxei8_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei8_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vloxei8_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -3684,7 +3684,7 @@ vuint64m4_t test_vloxei8_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei8_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vloxei8_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { @@ -3695,7 +3695,7 @@ vuint64m8_t test_vloxei8_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const u // CHECK-RV64-LABEL: @test_vloxei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vloxei16_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -3706,7 +3706,7 @@ vuint64m1_t test_vloxei16_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vloxei16_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -3717,7 +3717,7 @@ vuint64m2_t test_vloxei16_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei16_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vloxei16_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { @@ -3728,7 +3728,7 @@ vuint64m4_t test_vloxei16_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei16_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vloxei16_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { @@ -3739,7 +3739,7 @@ vuint64m8_t test_vloxei16_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vloxei32_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -3750,7 +3750,7 @@ vuint64m1_t test_vloxei32_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vloxei32_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -3761,7 +3761,7 @@ vuint64m2_t test_vloxei32_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei32_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vloxei32_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { @@ -3772,7 +3772,7 @@ vuint64m4_t test_vloxei32_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei32_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vloxei32_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { @@ -3783,7 +3783,7 @@ vuint64m8_t test_vloxei32_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vloxei64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -3794,7 +3794,7 @@ vuint64m1_t test_vloxei64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vloxei64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -3805,7 +3805,7 @@ vuint64m2_t test_vloxei64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vloxei64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { @@ -3816,7 +3816,7 @@ vuint64m4_t test_vloxei64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vloxei64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { @@ -3827,7 +3827,7 @@ vuint64m8_t test_vloxei64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vloxei8_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -3838,7 +3838,7 @@ vfloat32mf2_t test_vloxei8_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, c // CHECK-RV64-LABEL: @test_vloxei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vloxei8_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -3849,7 +3849,7 @@ vfloat32m1_t test_vloxei8_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, cons // CHECK-RV64-LABEL: @test_vloxei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vloxei8_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -3860,7 +3860,7 @@ vfloat32m2_t test_vloxei8_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, cons // CHECK-RV64-LABEL: @test_vloxei8_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vloxei8_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) { @@ -3871,7 +3871,7 @@ vfloat32m4_t test_vloxei8_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei8_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vloxei8_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) { @@ -3882,7 +3882,7 @@ vfloat32m8_t test_vloxei8_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vloxei16_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -3893,7 +3893,7 @@ vfloat32mf2_t test_vloxei16_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-LABEL: @test_vloxei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vloxei16_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -3904,7 +3904,7 @@ vfloat32m1_t test_vloxei16_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, con // CHECK-RV64-LABEL: @test_vloxei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vloxei16_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) { @@ -3915,7 +3915,7 @@ vfloat32m2_t test_vloxei16_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, con // CHECK-RV64-LABEL: @test_vloxei16_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vloxei16_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) { @@ -3926,7 +3926,7 @@ vfloat32m4_t test_vloxei16_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, cons // CHECK-RV64-LABEL: @test_vloxei16_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vloxei16_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) { @@ -3937,7 +3937,7 @@ vfloat32m8_t test_vloxei16_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, cons // CHECK-RV64-LABEL: @test_vloxei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vloxei32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -3948,7 +3948,7 @@ vfloat32mf2_t test_vloxei32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-LABEL: @test_vloxei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vloxei32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) { @@ -3959,7 +3959,7 @@ vfloat32m1_t test_vloxei32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, con // CHECK-RV64-LABEL: @test_vloxei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vloxei32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) { @@ -3970,7 +3970,7 @@ vfloat32m2_t test_vloxei32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, con // CHECK-RV64-LABEL: @test_vloxei32_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vloxei32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) { @@ -3981,7 +3981,7 @@ vfloat32m4_t test_vloxei32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, cons // CHECK-RV64-LABEL: @test_vloxei32_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vloxei32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) { @@ -3992,7 +3992,7 @@ vfloat32m8_t test_vloxei32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, cons // CHECK-RV64-LABEL: @test_vloxei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vloxei64_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) { @@ -4003,7 +4003,7 @@ vfloat32mf2_t test_vloxei64_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-LABEL: @test_vloxei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vloxei64_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) { @@ -4014,7 +4014,7 @@ vfloat32m1_t test_vloxei64_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, con // CHECK-RV64-LABEL: @test_vloxei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vloxei64_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) { @@ -4025,7 +4025,7 @@ vfloat32m2_t test_vloxei64_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, con // CHECK-RV64-LABEL: @test_vloxei64_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vloxei64_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) { @@ -4036,7 +4036,7 @@ vfloat32m4_t test_vloxei64_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, cons // CHECK-RV64-LABEL: @test_vloxei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vloxei8_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -4047,7 +4047,7 @@ vfloat64m1_t test_vloxei8_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, cons // CHECK-RV64-LABEL: @test_vloxei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vloxei8_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -4058,7 +4058,7 @@ vfloat64m2_t test_vloxei8_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, cons // CHECK-RV64-LABEL: @test_vloxei8_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vloxei8_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) { @@ -4069,7 +4069,7 @@ vfloat64m4_t test_vloxei8_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, cons // CHECK-RV64-LABEL: @test_vloxei8_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vloxei8_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) { @@ -4080,7 +4080,7 @@ vfloat64m8_t test_vloxei8_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const // CHECK-RV64-LABEL: @test_vloxei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vloxei16_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -4091,7 +4091,7 @@ vfloat64m1_t test_vloxei16_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, con // CHECK-RV64-LABEL: @test_vloxei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vloxei16_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -4102,7 +4102,7 @@ vfloat64m2_t test_vloxei16_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, con // CHECK-RV64-LABEL: @test_vloxei16_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vloxei16_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) { @@ -4113,7 +4113,7 @@ vfloat64m4_t test_vloxei16_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, con // CHECK-RV64-LABEL: @test_vloxei16_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vloxei16_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) { @@ -4124,7 +4124,7 @@ vfloat64m8_t test_vloxei16_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, cons // CHECK-RV64-LABEL: @test_vloxei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vloxei32_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -4135,7 +4135,7 @@ vfloat64m1_t test_vloxei32_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, con // CHECK-RV64-LABEL: @test_vloxei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vloxei32_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) { @@ -4146,7 +4146,7 @@ vfloat64m2_t test_vloxei32_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, con // CHECK-RV64-LABEL: @test_vloxei32_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vloxei32_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) { @@ -4157,7 +4157,7 @@ vfloat64m4_t test_vloxei32_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, con // CHECK-RV64-LABEL: @test_vloxei32_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vloxei32_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) { @@ -4168,7 +4168,7 @@ vfloat64m8_t test_vloxei32_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, cons // CHECK-RV64-LABEL: @test_vloxei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vloxei64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) { @@ -4179,7 +4179,7 @@ vfloat64m1_t test_vloxei64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, con // CHECK-RV64-LABEL: @test_vloxei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vloxei64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) { @@ -4190,7 +4190,7 @@ vfloat64m2_t test_vloxei64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, con // CHECK-RV64-LABEL: @test_vloxei64_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vloxei64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) { @@ -4201,7 +4201,7 @@ vfloat64m4_t test_vloxei64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, con // CHECK-RV64-LABEL: @test_vloxei64_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vloxei64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlse.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlse.c index afab365..18b04a2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlse.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlse.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlse8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vlse8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, @@ -22,7 +22,7 @@ vint8mf8_t test_vlse8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, // CHECK-RV64-LABEL: @test_vlse8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vlse8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, @@ -35,7 +35,7 @@ vint8mf4_t test_vlse8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, // CHECK-RV64-LABEL: @test_vlse8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vlse8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, @@ -48,7 +48,7 @@ vint8mf2_t test_vlse8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, // CHECK-RV64-LABEL: @test_vlse8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vlse8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, @@ -61,7 +61,7 @@ vint8m1_t test_vlse8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, // CHECK-RV64-LABEL: @test_vlse8_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vlse8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, @@ -74,7 +74,7 @@ vint8m2_t test_vlse8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, // CHECK-RV64-LABEL: @test_vlse8_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m4_t test_vlse8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, @@ -87,7 +87,7 @@ vint8m4_t test_vlse8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, // CHECK-RV64-LABEL: @test_vlse8_v_i8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vlse8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, @@ -100,7 +100,7 @@ vint8m8_t test_vlse8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, // CHECK-RV64-LABEL: @test_vlse16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vlse16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -113,7 +113,7 @@ vint16mf4_t test_vlse16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, // CHECK-RV64-LABEL: @test_vlse16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vlse16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -126,7 +126,7 @@ vint16mf2_t test_vlse16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, // CHECK-RV64-LABEL: @test_vlse16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vlse16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -139,7 +139,7 @@ vint16m1_t test_vlse16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, // CHECK-RV64-LABEL: @test_vlse16_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vlse16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -152,7 +152,7 @@ vint16m2_t test_vlse16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, // CHECK-RV64-LABEL: @test_vlse16_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vlse16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -165,7 +165,7 @@ vint16m4_t test_vlse16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, // CHECK-RV64-LABEL: @test_vlse16_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vlse16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -178,7 +178,7 @@ vint16m8_t test_vlse16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, // CHECK-RV64-LABEL: @test_vlse32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vlse32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -191,7 +191,7 @@ vint32mf2_t test_vlse32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, // CHECK-RV64-LABEL: @test_vlse32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vlse32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -204,7 +204,7 @@ vint32m1_t test_vlse32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, // CHECK-RV64-LABEL: @test_vlse32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vlse32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -217,7 +217,7 @@ vint32m2_t test_vlse32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, // CHECK-RV64-LABEL: @test_vlse32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vlse32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -230,7 +230,7 @@ vint32m4_t test_vlse32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, // CHECK-RV64-LABEL: @test_vlse32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vlse32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -243,7 +243,7 @@ vint32m8_t test_vlse32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, // CHECK-RV64-LABEL: @test_vlse64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vlse64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -256,7 +256,7 @@ vint64m1_t test_vlse64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, // CHECK-RV64-LABEL: @test_vlse64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vlse64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -269,7 +269,7 @@ vint64m2_t test_vlse64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, // CHECK-RV64-LABEL: @test_vlse64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vlse64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -282,7 +282,7 @@ vint64m4_t test_vlse64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, // CHECK-RV64-LABEL: @test_vlse64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vlse64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -295,7 +295,7 @@ vint64m8_t test_vlse64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, // CHECK-RV64-LABEL: @test_vlse8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vlse8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, @@ -308,7 +308,7 @@ vuint8mf8_t test_vlse8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, // CHECK-RV64-LABEL: @test_vlse8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vlse8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, @@ -321,7 +321,7 @@ vuint8mf4_t test_vlse8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, // CHECK-RV64-LABEL: @test_vlse8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vlse8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, @@ -334,7 +334,7 @@ vuint8mf2_t test_vlse8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, // CHECK-RV64-LABEL: @test_vlse8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vlse8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, @@ -347,7 +347,7 @@ vuint8m1_t test_vlse8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, // CHECK-RV64-LABEL: @test_vlse8_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vlse8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, @@ -360,7 +360,7 @@ vuint8m2_t test_vlse8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, // CHECK-RV64-LABEL: @test_vlse8_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m4_t test_vlse8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, @@ -373,7 +373,7 @@ vuint8m4_t test_vlse8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, // CHECK-RV64-LABEL: @test_vlse8_v_u8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vlse8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, @@ -386,7 +386,7 @@ vuint8m8_t test_vlse8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, // CHECK-RV64-LABEL: @test_vlse16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vlse16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -399,7 +399,7 @@ vuint16mf4_t test_vlse16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-LABEL: @test_vlse16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vlse16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -412,7 +412,7 @@ vuint16mf2_t test_vlse16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-LABEL: @test_vlse16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vlse16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -425,7 +425,7 @@ vuint16m1_t test_vlse16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, // CHECK-RV64-LABEL: @test_vlse16_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vlse16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -438,7 +438,7 @@ vuint16m2_t test_vlse16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, // CHECK-RV64-LABEL: @test_vlse16_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vlse16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -451,7 +451,7 @@ vuint16m4_t test_vlse16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, // CHECK-RV64-LABEL: @test_vlse16_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vlse16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -464,7 +464,7 @@ vuint16m8_t test_vlse16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, // CHECK-RV64-LABEL: @test_vlse32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vlse32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -477,7 +477,7 @@ vuint32mf2_t test_vlse32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-LABEL: @test_vlse32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vlse32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -490,7 +490,7 @@ vuint32m1_t test_vlse32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, // CHECK-RV64-LABEL: @test_vlse32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vlse32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -503,7 +503,7 @@ vuint32m2_t test_vlse32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, // CHECK-RV64-LABEL: @test_vlse32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vlse32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -516,7 +516,7 @@ vuint32m4_t test_vlse32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, // CHECK-RV64-LABEL: @test_vlse32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vlse32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -529,7 +529,7 @@ vuint32m8_t test_vlse32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, // CHECK-RV64-LABEL: @test_vlse64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vlse64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -542,7 +542,7 @@ vuint64m1_t test_vlse64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, // CHECK-RV64-LABEL: @test_vlse64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vlse64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -555,7 +555,7 @@ vuint64m2_t test_vlse64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, // CHECK-RV64-LABEL: @test_vlse64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vlse64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -568,7 +568,7 @@ vuint64m4_t test_vlse64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, // CHECK-RV64-LABEL: @test_vlse64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vlse64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, @@ -581,7 +581,7 @@ vuint64m8_t test_vlse64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, // CHECK-RV64-LABEL: @test_vlse32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vlse32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -594,7 +594,7 @@ vfloat32mf2_t test_vlse32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-LABEL: @test_vlse32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vlse32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -607,7 +607,7 @@ vfloat32m1_t test_vlse32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, // CHECK-RV64-LABEL: @test_vlse32_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vlse32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -620,7 +620,7 @@ vfloat32m2_t test_vlse32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, // CHECK-RV64-LABEL: @test_vlse32_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vlse32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -633,7 +633,7 @@ vfloat32m4_t test_vlse32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, // CHECK-RV64-LABEL: @test_vlse32_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vlse32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -646,7 +646,7 @@ vfloat32m8_t test_vlse32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, // CHECK-RV64-LABEL: @test_vlse64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vlse64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -659,7 +659,7 @@ vfloat64m1_t test_vlse64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // CHECK-RV64-LABEL: @test_vlse64_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vlse64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -672,7 +672,7 @@ vfloat64m2_t test_vlse64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // CHECK-RV64-LABEL: @test_vlse64_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vlse64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -685,7 +685,7 @@ vfloat64m4_t test_vlse64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // CHECK-RV64-LABEL: @test_vlse64_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vlse64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxei.c index d002c10..5363c07 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxei.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxei.c @@ -2110,7 +2110,7 @@ vfloat64m8_t test_vluxei64_v_f64m8(const double *base, vuint64m8_t bindex, size_ // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vluxei8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -2121,7 +2121,7 @@ vint8mf8_t test_vluxei8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const in // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vluxei8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -2132,7 +2132,7 @@ vint8mf4_t test_vluxei8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const in // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vluxei8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -2143,7 +2143,7 @@ vint8mf2_t test_vluxei8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const in // CHECK-RV64-LABEL: @test_vluxei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vluxei8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -2154,7 +2154,7 @@ vint8m1_t test_vluxei8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t // CHECK-RV64-LABEL: @test_vluxei8_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vluxei8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -2165,7 +2165,7 @@ vint8m2_t test_vluxei8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t // CHECK-RV64-LABEL: @test_vluxei8_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m4_t test_vluxei8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -2176,7 +2176,7 @@ vint8m4_t test_vluxei8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t // CHECK-RV64-LABEL: @test_vluxei8_v_i8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vluxei8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { @@ -2187,7 +2187,7 @@ vint8m8_t test_vluxei8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vluxei16_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -2198,7 +2198,7 @@ vint8mf8_t test_vluxei16_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const i // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vluxei16_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -2209,7 +2209,7 @@ vint8mf4_t test_vluxei16_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const i // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vluxei16_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -2220,7 +2220,7 @@ vint8mf2_t test_vluxei16_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const i // CHECK-RV64-LABEL: @test_vluxei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vluxei16_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -2231,7 +2231,7 @@ vint8m1_t test_vluxei16_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_ // CHECK-RV64-LABEL: @test_vluxei16_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vluxei16_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -2242,7 +2242,7 @@ vint8m2_t test_vluxei16_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_ // CHECK-RV64-LABEL: @test_vluxei16_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m4_t test_vluxei16_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -2253,7 +2253,7 @@ vint8m4_t test_vluxei16_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_ // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vluxei32_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -2264,7 +2264,7 @@ vint8mf8_t test_vluxei32_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const i // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vluxei32_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -2275,7 +2275,7 @@ vint8mf4_t test_vluxei32_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const i // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vluxei32_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -2286,7 +2286,7 @@ vint8mf2_t test_vluxei32_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const i // CHECK-RV64-LABEL: @test_vluxei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vluxei32_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -2297,7 +2297,7 @@ vint8m1_t test_vluxei32_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_ // CHECK-RV64-LABEL: @test_vluxei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vluxei32_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -2308,7 +2308,7 @@ vint8m2_t test_vluxei32_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_ // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vluxei64_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -2319,7 +2319,7 @@ vint8mf8_t test_vluxei64_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const i // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vluxei64_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -2330,7 +2330,7 @@ vint8mf4_t test_vluxei64_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const i // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vluxei64_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -2341,7 +2341,7 @@ vint8mf2_t test_vluxei64_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const i // CHECK-RV64-LABEL: @test_vluxei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vluxei64_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -2352,7 +2352,7 @@ vint8m1_t test_vluxei64_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_ // CHECK-RV64-LABEL: @test_vluxei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vluxei8_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -2363,7 +2363,7 @@ vint16mf4_t test_vluxei8_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vluxei8_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -2374,7 +2374,7 @@ vint16mf2_t test_vluxei8_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vluxei8_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -2385,7 +2385,7 @@ vint16m1_t test_vluxei8_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const in // CHECK-RV64-LABEL: @test_vluxei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vluxei8_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -2396,7 +2396,7 @@ vint16m2_t test_vluxei8_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int // CHECK-RV64-LABEL: @test_vluxei8_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vluxei8_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -2407,7 +2407,7 @@ vint16m4_t test_vluxei8_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int // CHECK-RV64-LABEL: @test_vluxei8_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vluxei8_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { @@ -2418,7 +2418,7 @@ vint16m8_t test_vluxei8_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int // CHECK-RV64-LABEL: @test_vluxei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vluxei16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -2429,7 +2429,7 @@ vint16mf4_t test_vluxei16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, cons // CHECK-RV64-LABEL: @test_vluxei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vluxei16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -2440,7 +2440,7 @@ vint16mf2_t test_vluxei16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, cons // CHECK-RV64-LABEL: @test_vluxei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vluxei16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -2451,7 +2451,7 @@ vint16m1_t test_vluxei16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const i // CHECK-RV64-LABEL: @test_vluxei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vluxei16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -2462,7 +2462,7 @@ vint16m2_t test_vluxei16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const in // CHECK-RV64-LABEL: @test_vluxei16_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vluxei16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -2473,7 +2473,7 @@ vint16m4_t test_vluxei16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const in // CHECK-RV64-LABEL: @test_vluxei16_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vluxei16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { @@ -2484,7 +2484,7 @@ vint16m8_t test_vluxei16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const in // CHECK-RV64-LABEL: @test_vluxei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vluxei32_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -2495,7 +2495,7 @@ vint16mf4_t test_vluxei32_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, cons // CHECK-RV64-LABEL: @test_vluxei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vluxei32_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -2506,7 +2506,7 @@ vint16mf2_t test_vluxei32_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, cons // CHECK-RV64-LABEL: @test_vluxei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vluxei32_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -2517,7 +2517,7 @@ vint16m1_t test_vluxei32_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const i // CHECK-RV64-LABEL: @test_vluxei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vluxei32_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -2528,7 +2528,7 @@ vint16m2_t test_vluxei32_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const in // CHECK-RV64-LABEL: @test_vluxei32_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vluxei32_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -2539,7 +2539,7 @@ vint16m4_t test_vluxei32_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const in // CHECK-RV64-LABEL: @test_vluxei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vluxei64_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -2550,7 +2550,7 @@ vint16mf4_t test_vluxei64_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, cons // CHECK-RV64-LABEL: @test_vluxei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vluxei64_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -2561,7 +2561,7 @@ vint16mf2_t test_vluxei64_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, cons // CHECK-RV64-LABEL: @test_vluxei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vluxei64_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -2572,7 +2572,7 @@ vint16m1_t test_vluxei64_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const i // CHECK-RV64-LABEL: @test_vluxei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vluxei64_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -2583,7 +2583,7 @@ vint16m2_t test_vluxei64_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const in // CHECK-RV64-LABEL: @test_vluxei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vluxei8_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -2594,7 +2594,7 @@ vint32mf2_t test_vluxei8_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vluxei8_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -2605,7 +2605,7 @@ vint32m1_t test_vluxei8_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const in // CHECK-RV64-LABEL: @test_vluxei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vluxei8_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -2616,7 +2616,7 @@ vint32m2_t test_vluxei8_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const in // CHECK-RV64-LABEL: @test_vluxei8_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vluxei8_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -2627,7 +2627,7 @@ vint32m4_t test_vluxei8_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int // CHECK-RV64-LABEL: @test_vluxei8_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vluxei8_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { @@ -2638,7 +2638,7 @@ vint32m8_t test_vluxei8_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int // CHECK-RV64-LABEL: @test_vluxei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vluxei16_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -2649,7 +2649,7 @@ vint32mf2_t test_vluxei16_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, cons // CHECK-RV64-LABEL: @test_vluxei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vluxei16_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -2660,7 +2660,7 @@ vint32m1_t test_vluxei16_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const i // CHECK-RV64-LABEL: @test_vluxei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vluxei16_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -2671,7 +2671,7 @@ vint32m2_t test_vluxei16_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const i // CHECK-RV64-LABEL: @test_vluxei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vluxei16_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -2682,7 +2682,7 @@ vint32m4_t test_vluxei16_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const in // CHECK-RV64-LABEL: @test_vluxei16_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vluxei16_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { @@ -2693,7 +2693,7 @@ vint32m8_t test_vluxei16_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const in // CHECK-RV64-LABEL: @test_vluxei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vluxei32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -2704,7 +2704,7 @@ vint32mf2_t test_vluxei32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, cons // CHECK-RV64-LABEL: @test_vluxei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vluxei32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -2715,7 +2715,7 @@ vint32m1_t test_vluxei32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const i // CHECK-RV64-LABEL: @test_vluxei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vluxei32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -2726,7 +2726,7 @@ vint32m2_t test_vluxei32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const i // CHECK-RV64-LABEL: @test_vluxei32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vluxei32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -2737,7 +2737,7 @@ vint32m4_t test_vluxei32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const in // CHECK-RV64-LABEL: @test_vluxei32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vluxei32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { @@ -2748,7 +2748,7 @@ vint32m8_t test_vluxei32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const in // CHECK-RV64-LABEL: @test_vluxei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vluxei64_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -2759,7 +2759,7 @@ vint32mf2_t test_vluxei64_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, cons // CHECK-RV64-LABEL: @test_vluxei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vluxei64_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -2770,7 +2770,7 @@ vint32m1_t test_vluxei64_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const i // CHECK-RV64-LABEL: @test_vluxei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vluxei64_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -2781,7 +2781,7 @@ vint32m2_t test_vluxei64_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const i // CHECK-RV64-LABEL: @test_vluxei64_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vluxei64_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -2792,7 +2792,7 @@ vint32m4_t test_vluxei64_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const in // CHECK-RV64-LABEL: @test_vluxei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vluxei8_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -2803,7 +2803,7 @@ vint64m1_t test_vluxei8_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const in // CHECK-RV64-LABEL: @test_vluxei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vluxei8_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -2814,7 +2814,7 @@ vint64m2_t test_vluxei8_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const in // CHECK-RV64-LABEL: @test_vluxei8_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vluxei8_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -2825,7 +2825,7 @@ vint64m4_t test_vluxei8_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const in // CHECK-RV64-LABEL: @test_vluxei8_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vluxei8_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { @@ -2836,7 +2836,7 @@ vint64m8_t test_vluxei8_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int // CHECK-RV64-LABEL: @test_vluxei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vluxei16_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -2847,7 +2847,7 @@ vint64m1_t test_vluxei16_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const i // CHECK-RV64-LABEL: @test_vluxei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vluxei16_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -2858,7 +2858,7 @@ vint64m2_t test_vluxei16_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const i // CHECK-RV64-LABEL: @test_vluxei16_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vluxei16_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -2869,7 +2869,7 @@ vint64m4_t test_vluxei16_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const i // CHECK-RV64-LABEL: @test_vluxei16_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vluxei16_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { @@ -2880,7 +2880,7 @@ vint64m8_t test_vluxei16_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const in // CHECK-RV64-LABEL: @test_vluxei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vluxei32_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -2891,7 +2891,7 @@ vint64m1_t test_vluxei32_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const i // CHECK-RV64-LABEL: @test_vluxei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vluxei32_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -2902,7 +2902,7 @@ vint64m2_t test_vluxei32_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const i // CHECK-RV64-LABEL: @test_vluxei32_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vluxei32_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -2913,7 +2913,7 @@ vint64m4_t test_vluxei32_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const i // CHECK-RV64-LABEL: @test_vluxei32_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vluxei32_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { @@ -2924,7 +2924,7 @@ vint64m8_t test_vluxei32_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const in // CHECK-RV64-LABEL: @test_vluxei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vluxei64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -2935,7 +2935,7 @@ vint64m1_t test_vluxei64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const i // CHECK-RV64-LABEL: @test_vluxei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vluxei64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -2946,7 +2946,7 @@ vint64m2_t test_vluxei64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const i // CHECK-RV64-LABEL: @test_vluxei64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vluxei64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -2957,7 +2957,7 @@ vint64m4_t test_vluxei64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const i // CHECK-RV64-LABEL: @test_vluxei64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vluxei64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { @@ -2968,7 +2968,7 @@ vint64m8_t test_vluxei64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const in // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vluxei8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -2979,7 +2979,7 @@ vuint8mf8_t test_vluxei8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vluxei8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -2990,7 +2990,7 @@ vuint8mf4_t test_vluxei8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vluxei8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -3001,7 +3001,7 @@ vuint8mf2_t test_vluxei8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vluxei8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -3012,7 +3012,7 @@ vuint8m1_t test_vluxei8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint // CHECK-RV64-LABEL: @test_vluxei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vluxei8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -3023,7 +3023,7 @@ vuint8m2_t test_vluxei8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint // CHECK-RV64-LABEL: @test_vluxei8_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m4_t test_vluxei8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -3034,7 +3034,7 @@ vuint8m4_t test_vluxei8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint // CHECK-RV64-LABEL: @test_vluxei8_v_u8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vluxei8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { @@ -3045,7 +3045,7 @@ vuint8m8_t test_vluxei8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vluxei16_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -3056,7 +3056,7 @@ vuint8mf8_t test_vluxei16_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vluxei16_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -3067,7 +3067,7 @@ vuint8mf4_t test_vluxei16_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vluxei16_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -3078,7 +3078,7 @@ vuint8mf2_t test_vluxei16_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vluxei16_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -3089,7 +3089,7 @@ vuint8m1_t test_vluxei16_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uin // CHECK-RV64-LABEL: @test_vluxei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vluxei16_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -3100,7 +3100,7 @@ vuint8m2_t test_vluxei16_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uin // CHECK-RV64-LABEL: @test_vluxei16_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m4_t test_vluxei16_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -3111,7 +3111,7 @@ vuint8m4_t test_vluxei16_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uin // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vluxei32_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -3122,7 +3122,7 @@ vuint8mf8_t test_vluxei32_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vluxei32_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -3133,7 +3133,7 @@ vuint8mf4_t test_vluxei32_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vluxei32_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -3144,7 +3144,7 @@ vuint8mf2_t test_vluxei32_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vluxei32_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -3155,7 +3155,7 @@ vuint8m1_t test_vluxei32_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uin // CHECK-RV64-LABEL: @test_vluxei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vluxei32_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -3166,7 +3166,7 @@ vuint8m2_t test_vluxei32_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uin // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vluxei64_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -3177,7 +3177,7 @@ vuint8mf8_t test_vluxei64_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vluxei64_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -3188,7 +3188,7 @@ vuint8mf4_t test_vluxei64_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vluxei64_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -3199,7 +3199,7 @@ vuint8mf2_t test_vluxei64_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vluxei64_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -3210,7 +3210,7 @@ vuint8m1_t test_vluxei64_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uin // CHECK-RV64-LABEL: @test_vluxei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vluxei8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -3221,7 +3221,7 @@ vuint16mf4_t test_vluxei8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, con // CHECK-RV64-LABEL: @test_vluxei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vluxei8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -3232,7 +3232,7 @@ vuint16mf2_t test_vluxei8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, con // CHECK-RV64-LABEL: @test_vluxei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vluxei8_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -3243,7 +3243,7 @@ vuint16m1_t test_vluxei8_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vluxei8_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -3254,7 +3254,7 @@ vuint16m2_t test_vluxei8_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const u // CHECK-RV64-LABEL: @test_vluxei8_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vluxei8_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -3265,7 +3265,7 @@ vuint16m4_t test_vluxei8_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const u // CHECK-RV64-LABEL: @test_vluxei8_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vluxei8_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { @@ -3276,7 +3276,7 @@ vuint16m8_t test_vluxei8_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const u // CHECK-RV64-LABEL: @test_vluxei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vluxei16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -3287,7 +3287,7 @@ vuint16mf4_t test_vluxei16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, co // CHECK-RV64-LABEL: @test_vluxei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vluxei16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -3298,7 +3298,7 @@ vuint16mf2_t test_vluxei16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, co // CHECK-RV64-LABEL: @test_vluxei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vluxei16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -3309,7 +3309,7 @@ vuint16m1_t test_vluxei16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vluxei16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -3320,7 +3320,7 @@ vuint16m2_t test_vluxei16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei16_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vluxei16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -3331,7 +3331,7 @@ vuint16m4_t test_vluxei16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei16_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vluxei16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { @@ -3342,7 +3342,7 @@ vuint16m8_t test_vluxei16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vluxei32_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -3353,7 +3353,7 @@ vuint16mf4_t test_vluxei32_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, co // CHECK-RV64-LABEL: @test_vluxei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vluxei32_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -3364,7 +3364,7 @@ vuint16mf2_t test_vluxei32_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, co // CHECK-RV64-LABEL: @test_vluxei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vluxei32_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -3375,7 +3375,7 @@ vuint16m1_t test_vluxei32_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vluxei32_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -3386,7 +3386,7 @@ vuint16m2_t test_vluxei32_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei32_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vluxei32_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -3397,7 +3397,7 @@ vuint16m4_t test_vluxei32_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vluxei64_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -3408,7 +3408,7 @@ vuint16mf4_t test_vluxei64_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, co // CHECK-RV64-LABEL: @test_vluxei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vluxei64_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -3419,7 +3419,7 @@ vuint16mf2_t test_vluxei64_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, co // CHECK-RV64-LABEL: @test_vluxei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vluxei64_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -3430,7 +3430,7 @@ vuint16m1_t test_vluxei64_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vluxei64_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -3441,7 +3441,7 @@ vuint16m2_t test_vluxei64_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vluxei8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -3452,7 +3452,7 @@ vuint32mf2_t test_vluxei8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, con // CHECK-RV64-LABEL: @test_vluxei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vluxei8_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -3463,7 +3463,7 @@ vuint32m1_t test_vluxei8_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vluxei8_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -3474,7 +3474,7 @@ vuint32m2_t test_vluxei8_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vluxei8_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -3485,7 +3485,7 @@ vuint32m4_t test_vluxei8_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const u // CHECK-RV64-LABEL: @test_vluxei8_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vluxei8_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { @@ -3496,7 +3496,7 @@ vuint32m8_t test_vluxei8_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const u // CHECK-RV64-LABEL: @test_vluxei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vluxei16_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -3507,7 +3507,7 @@ vuint32mf2_t test_vluxei16_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, co // CHECK-RV64-LABEL: @test_vluxei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vluxei16_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -3518,7 +3518,7 @@ vuint32m1_t test_vluxei16_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vluxei16_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -3529,7 +3529,7 @@ vuint32m2_t test_vluxei16_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei16_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vluxei16_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -3540,7 +3540,7 @@ vuint32m4_t test_vluxei16_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei16_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vluxei16_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { @@ -3551,7 +3551,7 @@ vuint32m8_t test_vluxei16_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vluxei32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -3562,7 +3562,7 @@ vuint32mf2_t test_vluxei32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, co // CHECK-RV64-LABEL: @test_vluxei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vluxei32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -3573,7 +3573,7 @@ vuint32m1_t test_vluxei32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vluxei32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -3584,7 +3584,7 @@ vuint32m2_t test_vluxei32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vluxei32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -3595,7 +3595,7 @@ vuint32m4_t test_vluxei32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vluxei32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { @@ -3606,7 +3606,7 @@ vuint32m8_t test_vluxei32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vluxei64_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -3617,7 +3617,7 @@ vuint32mf2_t test_vluxei64_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, co // CHECK-RV64-LABEL: @test_vluxei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vluxei64_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -3628,7 +3628,7 @@ vuint32m1_t test_vluxei64_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vluxei64_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -3639,7 +3639,7 @@ vuint32m2_t test_vluxei64_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei64_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vluxei64_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -3650,7 +3650,7 @@ vuint32m4_t test_vluxei64_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vluxei8_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -3661,7 +3661,7 @@ vuint64m1_t test_vluxei8_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vluxei8_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -3672,7 +3672,7 @@ vuint64m2_t test_vluxei8_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei8_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vluxei8_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -3683,7 +3683,7 @@ vuint64m4_t test_vluxei8_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei8_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vluxei8_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { @@ -3694,7 +3694,7 @@ vuint64m8_t test_vluxei8_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const u // CHECK-RV64-LABEL: @test_vluxei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vluxei16_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -3705,7 +3705,7 @@ vuint64m1_t test_vluxei16_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vluxei16_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -3716,7 +3716,7 @@ vuint64m2_t test_vluxei16_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei16_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vluxei16_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { @@ -3727,7 +3727,7 @@ vuint64m4_t test_vluxei16_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei16_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vluxei16_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { @@ -3738,7 +3738,7 @@ vuint64m8_t test_vluxei16_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vluxei32_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -3749,7 +3749,7 @@ vuint64m1_t test_vluxei32_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vluxei32_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -3760,7 +3760,7 @@ vuint64m2_t test_vluxei32_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei32_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vluxei32_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { @@ -3771,7 +3771,7 @@ vuint64m4_t test_vluxei32_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei32_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vluxei32_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { @@ -3782,7 +3782,7 @@ vuint64m8_t test_vluxei32_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vluxei64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -3793,7 +3793,7 @@ vuint64m1_t test_vluxei64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vluxei64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -3804,7 +3804,7 @@ vuint64m2_t test_vluxei64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vluxei64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { @@ -3815,7 +3815,7 @@ vuint64m4_t test_vluxei64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vluxei64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { @@ -3826,7 +3826,7 @@ vuint64m8_t test_vluxei64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vluxei8_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -3837,7 +3837,7 @@ vfloat32mf2_t test_vluxei8_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, c // CHECK-RV64-LABEL: @test_vluxei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vluxei8_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -3848,7 +3848,7 @@ vfloat32m1_t test_vluxei8_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, cons // CHECK-RV64-LABEL: @test_vluxei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vluxei8_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -3859,7 +3859,7 @@ vfloat32m2_t test_vluxei8_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, cons // CHECK-RV64-LABEL: @test_vluxei8_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vluxei8_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) { @@ -3870,7 +3870,7 @@ vfloat32m4_t test_vluxei8_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei8_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vluxei8_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) { @@ -3881,7 +3881,7 @@ vfloat32m8_t test_vluxei8_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vluxei16_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -3892,7 +3892,7 @@ vfloat32mf2_t test_vluxei16_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-LABEL: @test_vluxei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vluxei16_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -3903,7 +3903,7 @@ vfloat32m1_t test_vluxei16_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, con // CHECK-RV64-LABEL: @test_vluxei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vluxei16_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) { @@ -3914,7 +3914,7 @@ vfloat32m2_t test_vluxei16_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, con // CHECK-RV64-LABEL: @test_vluxei16_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vluxei16_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) { @@ -3925,7 +3925,7 @@ vfloat32m4_t test_vluxei16_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, cons // CHECK-RV64-LABEL: @test_vluxei16_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vluxei16_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) { @@ -3936,7 +3936,7 @@ vfloat32m8_t test_vluxei16_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, cons // CHECK-RV64-LABEL: @test_vluxei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vluxei32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -3947,7 +3947,7 @@ vfloat32mf2_t test_vluxei32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-LABEL: @test_vluxei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vluxei32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) { @@ -3958,7 +3958,7 @@ vfloat32m1_t test_vluxei32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, con // CHECK-RV64-LABEL: @test_vluxei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vluxei32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) { @@ -3969,7 +3969,7 @@ vfloat32m2_t test_vluxei32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, con // CHECK-RV64-LABEL: @test_vluxei32_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vluxei32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) { @@ -3980,7 +3980,7 @@ vfloat32m4_t test_vluxei32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, cons // CHECK-RV64-LABEL: @test_vluxei32_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vluxei32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) { @@ -3991,7 +3991,7 @@ vfloat32m8_t test_vluxei32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, cons // CHECK-RV64-LABEL: @test_vluxei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vluxei64_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) { @@ -4002,7 +4002,7 @@ vfloat32mf2_t test_vluxei64_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-LABEL: @test_vluxei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vluxei64_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) { @@ -4013,7 +4013,7 @@ vfloat32m1_t test_vluxei64_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, con // CHECK-RV64-LABEL: @test_vluxei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vluxei64_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) { @@ -4024,7 +4024,7 @@ vfloat32m2_t test_vluxei64_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, con // CHECK-RV64-LABEL: @test_vluxei64_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vluxei64_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) { @@ -4035,7 +4035,7 @@ vfloat32m4_t test_vluxei64_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, cons // CHECK-RV64-LABEL: @test_vluxei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vluxei8_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -4046,7 +4046,7 @@ vfloat64m1_t test_vluxei8_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, cons // CHECK-RV64-LABEL: @test_vluxei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vluxei8_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -4057,7 +4057,7 @@ vfloat64m2_t test_vluxei8_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, cons // CHECK-RV64-LABEL: @test_vluxei8_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vluxei8_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) { @@ -4068,7 +4068,7 @@ vfloat64m4_t test_vluxei8_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, cons // CHECK-RV64-LABEL: @test_vluxei8_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vluxei8_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) { @@ -4079,7 +4079,7 @@ vfloat64m8_t test_vluxei8_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const // CHECK-RV64-LABEL: @test_vluxei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vluxei16_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -4090,7 +4090,7 @@ vfloat64m1_t test_vluxei16_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, con // CHECK-RV64-LABEL: @test_vluxei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vluxei16_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -4101,7 +4101,7 @@ vfloat64m2_t test_vluxei16_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, con // CHECK-RV64-LABEL: @test_vluxei16_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vluxei16_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) { @@ -4112,7 +4112,7 @@ vfloat64m4_t test_vluxei16_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, con // CHECK-RV64-LABEL: @test_vluxei16_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vluxei16_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) { @@ -4123,7 +4123,7 @@ vfloat64m8_t test_vluxei16_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, cons // CHECK-RV64-LABEL: @test_vluxei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vluxei32_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -4134,7 +4134,7 @@ vfloat64m1_t test_vluxei32_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, con // CHECK-RV64-LABEL: @test_vluxei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vluxei32_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) { @@ -4145,7 +4145,7 @@ vfloat64m2_t test_vluxei32_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, con // CHECK-RV64-LABEL: @test_vluxei32_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vluxei32_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) { @@ -4156,7 +4156,7 @@ vfloat64m4_t test_vluxei32_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, con // CHECK-RV64-LABEL: @test_vluxei32_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vluxei32_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) { @@ -4167,7 +4167,7 @@ vfloat64m8_t test_vluxei32_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, cons // CHECK-RV64-LABEL: @test_vluxei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vluxei64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) { @@ -4178,7 +4178,7 @@ vfloat64m1_t test_vluxei64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, con // CHECK-RV64-LABEL: @test_vluxei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vluxei64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) { @@ -4189,7 +4189,7 @@ vfloat64m2_t test_vluxei64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, con // CHECK-RV64-LABEL: @test_vluxei64_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vluxei64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) { @@ -4200,7 +4200,7 @@ vfloat64m4_t test_vluxei64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, con // CHECK-RV64-LABEL: @test_vluxei64_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vluxei64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmacc.c index 66adf5b1..e902a63 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmacc.c @@ -887,7 +887,7 @@ vuint64m8_t test_vmacc_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, // // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmacc_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -897,7 +897,7 @@ vint8mf8_t test_vmacc_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, // // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmacc_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) { @@ -907,7 +907,7 @@ vint8mf8_t test_vmacc_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vin // // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmacc_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -917,7 +917,7 @@ vint8mf4_t test_vmacc_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, // // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmacc_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) { @@ -927,7 +927,7 @@ vint8mf4_t test_vmacc_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vin // // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmacc_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -937,7 +937,7 @@ vint8mf2_t test_vmacc_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, // // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmacc_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) { @@ -947,7 +947,7 @@ vint8mf2_t test_vmacc_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vin // // CHECK-RV64-LABEL: @test_vmacc_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmacc_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -957,7 +957,7 @@ vint8m1_t test_vmacc_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint // // CHECK-RV64-LABEL: @test_vmacc_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmacc_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) { @@ -967,7 +967,7 @@ vint8m1_t test_vmacc_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1 // // CHECK-RV64-LABEL: @test_vmacc_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmacc_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -977,7 +977,7 @@ vint8m2_t test_vmacc_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint // // CHECK-RV64-LABEL: @test_vmacc_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmacc_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) { @@ -987,7 +987,7 @@ vint8m2_t test_vmacc_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2 // // CHECK-RV64-LABEL: @test_vmacc_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmacc_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -997,7 +997,7 @@ vint8m4_t test_vmacc_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint // // CHECK-RV64-LABEL: @test_vmacc_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmacc_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) { @@ -1007,7 +1007,7 @@ vint8m4_t test_vmacc_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4 // // CHECK-RV64-LABEL: @test_vmacc_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmacc_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -1017,7 +1017,7 @@ vint8m8_t test_vmacc_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint // // CHECK-RV64-LABEL: @test_vmacc_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmacc_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) { @@ -1027,7 +1027,7 @@ vint8m8_t test_vmacc_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8 // // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -1037,7 +1037,7 @@ vint16mf4_t test_vmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t // // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) { @@ -1047,7 +1047,7 @@ vint16mf4_t test_vmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, // // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -1057,7 +1057,7 @@ vint16mf2_t test_vmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t // // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) { @@ -1067,7 +1067,7 @@ vint16mf2_t test_vmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, // // CHECK-RV64-LABEL: @test_vmacc_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -1077,7 +1077,7 @@ vint16m1_t test_vmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, // // CHECK-RV64-LABEL: @test_vmacc_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) { @@ -1087,7 +1087,7 @@ vint16m1_t test_vmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vi // // CHECK-RV64-LABEL: @test_vmacc_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -1097,7 +1097,7 @@ vint16m2_t test_vmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, // // CHECK-RV64-LABEL: @test_vmacc_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) { @@ -1107,7 +1107,7 @@ vint16m2_t test_vmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vin // // CHECK-RV64-LABEL: @test_vmacc_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1117,7 +1117,7 @@ vint16m4_t test_vmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, // // CHECK-RV64-LABEL: @test_vmacc_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) { @@ -1127,7 +1127,7 @@ vint16m4_t test_vmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vin // // CHECK-RV64-LABEL: @test_vmacc_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -1137,7 +1137,7 @@ vint16m8_t test_vmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, // // CHECK-RV64-LABEL: @test_vmacc_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) { @@ -1147,7 +1147,7 @@ vint16m8_t test_vmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vin // // CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1157,7 +1157,7 @@ vint32mf2_t test_vmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t // // CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) { @@ -1167,7 +1167,7 @@ vint32mf2_t test_vmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, // // CHECK-RV64-LABEL: @test_vmacc_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1177,7 +1177,7 @@ vint32m1_t test_vmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, // // CHECK-RV64-LABEL: @test_vmacc_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) { @@ -1187,7 +1187,7 @@ vint32m1_t test_vmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vi // // CHECK-RV64-LABEL: @test_vmacc_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1197,7 +1197,7 @@ vint32m2_t test_vmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, // // CHECK-RV64-LABEL: @test_vmacc_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) { @@ -1207,7 +1207,7 @@ vint32m2_t test_vmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vi // // CHECK-RV64-LABEL: @test_vmacc_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1217,7 +1217,7 @@ vint32m4_t test_vmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, // // CHECK-RV64-LABEL: @test_vmacc_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) { @@ -1227,7 +1227,7 @@ vint32m4_t test_vmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vin // // CHECK-RV64-LABEL: @test_vmacc_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1237,7 +1237,7 @@ vint32m8_t test_vmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, // // CHECK-RV64-LABEL: @test_vmacc_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) { @@ -1247,7 +1247,7 @@ vint32m8_t test_vmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vin // // CHECK-RV64-LABEL: @test_vmacc_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -1257,7 +1257,7 @@ vint64m1_t test_vmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, // // CHECK-RV64-LABEL: @test_vmacc_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) { @@ -1267,7 +1267,7 @@ vint64m1_t test_vmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vi // // CHECK-RV64-LABEL: @test_vmacc_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -1277,7 +1277,7 @@ vint64m2_t test_vmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, // // CHECK-RV64-LABEL: @test_vmacc_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) { @@ -1287,7 +1287,7 @@ vint64m2_t test_vmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vi // // CHECK-RV64-LABEL: @test_vmacc_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -1297,7 +1297,7 @@ vint64m4_t test_vmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, // // CHECK-RV64-LABEL: @test_vmacc_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) { @@ -1307,7 +1307,7 @@ vint64m4_t test_vmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vi // // CHECK-RV64-LABEL: @test_vmacc_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -1317,7 +1317,7 @@ vint64m8_t test_vmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, // // CHECK-RV64-LABEL: @test_vmacc_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) { @@ -1327,7 +1327,7 @@ vint64m8_t test_vmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vin // // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmacc_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1337,7 +1337,7 @@ vuint8mf8_t test_vmacc_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t o // // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmacc_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1347,7 +1347,7 @@ vuint8mf8_t test_vmacc_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, // // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmacc_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1357,7 +1357,7 @@ vuint8mf4_t test_vmacc_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t o // // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmacc_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) { @@ -1367,7 +1367,7 @@ vuint8mf4_t test_vmacc_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, // // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmacc_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1377,7 +1377,7 @@ vuint8mf2_t test_vmacc_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t o // // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmacc_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) { @@ -1387,7 +1387,7 @@ vuint8mf2_t test_vmacc_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, // // CHECK-RV64-LABEL: @test_vmacc_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmacc_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1397,7 +1397,7 @@ vuint8m1_t test_vmacc_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, v // // CHECK-RV64-LABEL: @test_vmacc_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmacc_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) { @@ -1407,7 +1407,7 @@ vuint8m1_t test_vmacc_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuin // // CHECK-RV64-LABEL: @test_vmacc_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmacc_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1417,7 +1417,7 @@ vuint8m2_t test_vmacc_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, v // // CHECK-RV64-LABEL: @test_vmacc_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmacc_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) { @@ -1427,7 +1427,7 @@ vuint8m2_t test_vmacc_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuin // // CHECK-RV64-LABEL: @test_vmacc_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmacc_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -1437,7 +1437,7 @@ vuint8m4_t test_vmacc_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, v // // CHECK-RV64-LABEL: @test_vmacc_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmacc_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) { @@ -1447,7 +1447,7 @@ vuint8m4_t test_vmacc_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuin // // CHECK-RV64-LABEL: @test_vmacc_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmacc_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -1457,7 +1457,7 @@ vuint8m8_t test_vmacc_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, v // // CHECK-RV64-LABEL: @test_vmacc_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmacc_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) { @@ -1467,7 +1467,7 @@ vuint8m8_t test_vmacc_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuin // // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmacc_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -1477,7 +1477,7 @@ vuint16mf4_t test_vmacc_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4 // // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmacc_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) { @@ -1487,7 +1487,7 @@ vuint16mf4_t test_vmacc_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t o // // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmacc_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -1497,7 +1497,7 @@ vuint16mf2_t test_vmacc_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2 // // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmacc_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) { @@ -1507,7 +1507,7 @@ vuint16mf2_t test_vmacc_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t o // // CHECK-RV64-LABEL: @test_vmacc_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmacc_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -1517,7 +1517,7 @@ vuint16m1_t test_vmacc_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t o // // CHECK-RV64-LABEL: @test_vmacc_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmacc_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) { @@ -1527,7 +1527,7 @@ vuint16m1_t test_vmacc_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, // // CHECK-RV64-LABEL: @test_vmacc_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmacc_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -1537,7 +1537,7 @@ vuint16m2_t test_vmacc_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op // // CHECK-RV64-LABEL: @test_vmacc_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmacc_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) { @@ -1547,7 +1547,7 @@ vuint16m2_t test_vmacc_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, // // CHECK-RV64-LABEL: @test_vmacc_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmacc_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -1557,7 +1557,7 @@ vuint16m4_t test_vmacc_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op // // CHECK-RV64-LABEL: @test_vmacc_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmacc_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) { @@ -1567,7 +1567,7 @@ vuint16m4_t test_vmacc_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, // // CHECK-RV64-LABEL: @test_vmacc_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmacc_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -1577,7 +1577,7 @@ vuint16m8_t test_vmacc_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op // // CHECK-RV64-LABEL: @test_vmacc_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmacc_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) { @@ -1587,7 +1587,7 @@ vuint16m8_t test_vmacc_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, // // CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmacc_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1597,7 +1597,7 @@ vuint32mf2_t test_vmacc_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2 // // CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmacc_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) { @@ -1607,7 +1607,7 @@ vuint32mf2_t test_vmacc_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t o // // CHECK-RV64-LABEL: @test_vmacc_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmacc_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1617,7 +1617,7 @@ vuint32m1_t test_vmacc_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t o // // CHECK-RV64-LABEL: @test_vmacc_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmacc_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) { @@ -1627,7 +1627,7 @@ vuint32m1_t test_vmacc_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, // // CHECK-RV64-LABEL: @test_vmacc_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmacc_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1637,7 +1637,7 @@ vuint32m2_t test_vmacc_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t o // // CHECK-RV64-LABEL: @test_vmacc_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmacc_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) { @@ -1647,7 +1647,7 @@ vuint32m2_t test_vmacc_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, // // CHECK-RV64-LABEL: @test_vmacc_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmacc_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1657,7 +1657,7 @@ vuint32m4_t test_vmacc_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op // // CHECK-RV64-LABEL: @test_vmacc_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmacc_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) { @@ -1667,7 +1667,7 @@ vuint32m4_t test_vmacc_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, // // CHECK-RV64-LABEL: @test_vmacc_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmacc_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1677,7 +1677,7 @@ vuint32m8_t test_vmacc_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op // // CHECK-RV64-LABEL: @test_vmacc_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmacc_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) { @@ -1687,7 +1687,7 @@ vuint32m8_t test_vmacc_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, // // CHECK-RV64-LABEL: @test_vmacc_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmacc_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -1697,7 +1697,7 @@ vuint64m1_t test_vmacc_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t o // // CHECK-RV64-LABEL: @test_vmacc_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmacc_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) { @@ -1707,7 +1707,7 @@ vuint64m1_t test_vmacc_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, // // CHECK-RV64-LABEL: @test_vmacc_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmacc_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -1717,7 +1717,7 @@ vuint64m2_t test_vmacc_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t o // // CHECK-RV64-LABEL: @test_vmacc_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmacc_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) { @@ -1727,7 +1727,7 @@ vuint64m2_t test_vmacc_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, // // CHECK-RV64-LABEL: @test_vmacc_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmacc_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -1737,7 +1737,7 @@ vuint64m4_t test_vmacc_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t o // // CHECK-RV64-LABEL: @test_vmacc_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmacc_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) { @@ -1747,7 +1747,7 @@ vuint64m4_t test_vmacc_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, // // CHECK-RV64-LABEL: @test_vmacc_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmacc_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1757,7 +1757,7 @@ vuint64m8_t test_vmacc_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op // // CHECK-RV64-LABEL: @test_vmacc_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmacc_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadd.c index e1f4812..4bd51db 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadd.c @@ -887,7 +887,7 @@ vuint64m8_t test_vmadd_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, // // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -897,7 +897,7 @@ vint8mf8_t test_vmadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, // // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) { @@ -907,7 +907,7 @@ vint8mf8_t test_vmadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vin // // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -917,7 +917,7 @@ vint8mf4_t test_vmadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, // // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) { @@ -927,7 +927,7 @@ vint8mf4_t test_vmadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vin // // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -937,7 +937,7 @@ vint8mf2_t test_vmadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, // // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) { @@ -947,7 +947,7 @@ vint8mf2_t test_vmadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vin // // CHECK-RV64-LABEL: @test_vmadd_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmadd_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -957,7 +957,7 @@ vint8m1_t test_vmadd_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint // // CHECK-RV64-LABEL: @test_vmadd_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmadd_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) { @@ -967,7 +967,7 @@ vint8m1_t test_vmadd_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1 // // CHECK-RV64-LABEL: @test_vmadd_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmadd_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -977,7 +977,7 @@ vint8m2_t test_vmadd_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint // // CHECK-RV64-LABEL: @test_vmadd_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmadd_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) { @@ -987,7 +987,7 @@ vint8m2_t test_vmadd_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2 // // CHECK-RV64-LABEL: @test_vmadd_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmadd_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -997,7 +997,7 @@ vint8m4_t test_vmadd_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint // // CHECK-RV64-LABEL: @test_vmadd_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmadd_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) { @@ -1007,7 +1007,7 @@ vint8m4_t test_vmadd_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4 // // CHECK-RV64-LABEL: @test_vmadd_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmadd_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -1017,7 +1017,7 @@ vint8m8_t test_vmadd_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint // // CHECK-RV64-LABEL: @test_vmadd_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmadd_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) { @@ -1027,7 +1027,7 @@ vint8m8_t test_vmadd_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8 // // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -1037,7 +1037,7 @@ vint16mf4_t test_vmadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t // // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) { @@ -1047,7 +1047,7 @@ vint16mf4_t test_vmadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, // // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -1057,7 +1057,7 @@ vint16mf2_t test_vmadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t // // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) { @@ -1067,7 +1067,7 @@ vint16mf2_t test_vmadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, // // CHECK-RV64-LABEL: @test_vmadd_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmadd_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -1077,7 +1077,7 @@ vint16m1_t test_vmadd_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, // // CHECK-RV64-LABEL: @test_vmadd_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmadd_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) { @@ -1087,7 +1087,7 @@ vint16m1_t test_vmadd_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vi // // CHECK-RV64-LABEL: @test_vmadd_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmadd_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -1097,7 +1097,7 @@ vint16m2_t test_vmadd_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, // // CHECK-RV64-LABEL: @test_vmadd_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmadd_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) { @@ -1107,7 +1107,7 @@ vint16m2_t test_vmadd_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vin // // CHECK-RV64-LABEL: @test_vmadd_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmadd_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1117,7 +1117,7 @@ vint16m4_t test_vmadd_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, // // CHECK-RV64-LABEL: @test_vmadd_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmadd_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) { @@ -1127,7 +1127,7 @@ vint16m4_t test_vmadd_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vin // // CHECK-RV64-LABEL: @test_vmadd_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmadd_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -1137,7 +1137,7 @@ vint16m8_t test_vmadd_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, // // CHECK-RV64-LABEL: @test_vmadd_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmadd_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) { @@ -1147,7 +1147,7 @@ vint16m8_t test_vmadd_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vin // // CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1157,7 +1157,7 @@ vint32mf2_t test_vmadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t // // CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) { @@ -1167,7 +1167,7 @@ vint32mf2_t test_vmadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, // // CHECK-RV64-LABEL: @test_vmadd_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmadd_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1177,7 +1177,7 @@ vint32m1_t test_vmadd_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, // // CHECK-RV64-LABEL: @test_vmadd_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmadd_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) { @@ -1187,7 +1187,7 @@ vint32m1_t test_vmadd_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vi // // CHECK-RV64-LABEL: @test_vmadd_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmadd_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1197,7 +1197,7 @@ vint32m2_t test_vmadd_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, // // CHECK-RV64-LABEL: @test_vmadd_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmadd_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) { @@ -1207,7 +1207,7 @@ vint32m2_t test_vmadd_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vi // // CHECK-RV64-LABEL: @test_vmadd_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmadd_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1217,7 +1217,7 @@ vint32m4_t test_vmadd_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, // // CHECK-RV64-LABEL: @test_vmadd_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmadd_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) { @@ -1227,7 +1227,7 @@ vint32m4_t test_vmadd_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vin // // CHECK-RV64-LABEL: @test_vmadd_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmadd_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1237,7 +1237,7 @@ vint32m8_t test_vmadd_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, // // CHECK-RV64-LABEL: @test_vmadd_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmadd_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) { @@ -1247,7 +1247,7 @@ vint32m8_t test_vmadd_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vin // // CHECK-RV64-LABEL: @test_vmadd_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmadd_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -1257,7 +1257,7 @@ vint64m1_t test_vmadd_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, // // CHECK-RV64-LABEL: @test_vmadd_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmadd_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) { @@ -1267,7 +1267,7 @@ vint64m1_t test_vmadd_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vi // // CHECK-RV64-LABEL: @test_vmadd_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmadd_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -1277,7 +1277,7 @@ vint64m2_t test_vmadd_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, // // CHECK-RV64-LABEL: @test_vmadd_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmadd_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) { @@ -1287,7 +1287,7 @@ vint64m2_t test_vmadd_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vi // // CHECK-RV64-LABEL: @test_vmadd_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmadd_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -1297,7 +1297,7 @@ vint64m4_t test_vmadd_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, // // CHECK-RV64-LABEL: @test_vmadd_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmadd_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) { @@ -1307,7 +1307,7 @@ vint64m4_t test_vmadd_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vi // // CHECK-RV64-LABEL: @test_vmadd_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmadd_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -1317,7 +1317,7 @@ vint64m8_t test_vmadd_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, // // CHECK-RV64-LABEL: @test_vmadd_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmadd_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) { @@ -1327,7 +1327,7 @@ vint64m8_t test_vmadd_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vin // // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1337,7 +1337,7 @@ vuint8mf8_t test_vmadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t o // // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1347,7 +1347,7 @@ vuint8mf8_t test_vmadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, // // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1357,7 +1357,7 @@ vuint8mf4_t test_vmadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t o // // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) { @@ -1367,7 +1367,7 @@ vuint8mf4_t test_vmadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, // // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1377,7 +1377,7 @@ vuint8mf2_t test_vmadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t o // // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) { @@ -1387,7 +1387,7 @@ vuint8mf2_t test_vmadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, // // CHECK-RV64-LABEL: @test_vmadd_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1397,7 +1397,7 @@ vuint8m1_t test_vmadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, v // // CHECK-RV64-LABEL: @test_vmadd_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) { @@ -1407,7 +1407,7 @@ vuint8m1_t test_vmadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuin // // CHECK-RV64-LABEL: @test_vmadd_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1417,7 +1417,7 @@ vuint8m2_t test_vmadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, v // // CHECK-RV64-LABEL: @test_vmadd_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) { @@ -1427,7 +1427,7 @@ vuint8m2_t test_vmadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuin // // CHECK-RV64-LABEL: @test_vmadd_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -1437,7 +1437,7 @@ vuint8m4_t test_vmadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, v // // CHECK-RV64-LABEL: @test_vmadd_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) { @@ -1447,7 +1447,7 @@ vuint8m4_t test_vmadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuin // // CHECK-RV64-LABEL: @test_vmadd_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -1457,7 +1457,7 @@ vuint8m8_t test_vmadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, v // // CHECK-RV64-LABEL: @test_vmadd_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) { @@ -1467,7 +1467,7 @@ vuint8m8_t test_vmadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuin // // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -1477,7 +1477,7 @@ vuint16mf4_t test_vmadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4 // // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) { @@ -1487,7 +1487,7 @@ vuint16mf4_t test_vmadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t o // // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -1497,7 +1497,7 @@ vuint16mf2_t test_vmadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2 // // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) { @@ -1507,7 +1507,7 @@ vuint16mf2_t test_vmadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t o // // CHECK-RV64-LABEL: @test_vmadd_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -1517,7 +1517,7 @@ vuint16m1_t test_vmadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t o // // CHECK-RV64-LABEL: @test_vmadd_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) { @@ -1527,7 +1527,7 @@ vuint16m1_t test_vmadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, // // CHECK-RV64-LABEL: @test_vmadd_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -1537,7 +1537,7 @@ vuint16m2_t test_vmadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op // // CHECK-RV64-LABEL: @test_vmadd_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) { @@ -1547,7 +1547,7 @@ vuint16m2_t test_vmadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, // // CHECK-RV64-LABEL: @test_vmadd_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -1557,7 +1557,7 @@ vuint16m4_t test_vmadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op // // CHECK-RV64-LABEL: @test_vmadd_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) { @@ -1567,7 +1567,7 @@ vuint16m4_t test_vmadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, // // CHECK-RV64-LABEL: @test_vmadd_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -1577,7 +1577,7 @@ vuint16m8_t test_vmadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op // // CHECK-RV64-LABEL: @test_vmadd_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) { @@ -1587,7 +1587,7 @@ vuint16m8_t test_vmadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, // // CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1597,7 +1597,7 @@ vuint32mf2_t test_vmadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2 // // CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) { @@ -1607,7 +1607,7 @@ vuint32mf2_t test_vmadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t o // // CHECK-RV64-LABEL: @test_vmadd_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1617,7 +1617,7 @@ vuint32m1_t test_vmadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t o // // CHECK-RV64-LABEL: @test_vmadd_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) { @@ -1627,7 +1627,7 @@ vuint32m1_t test_vmadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, // // CHECK-RV64-LABEL: @test_vmadd_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1637,7 +1637,7 @@ vuint32m2_t test_vmadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t o // // CHECK-RV64-LABEL: @test_vmadd_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) { @@ -1647,7 +1647,7 @@ vuint32m2_t test_vmadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, // // CHECK-RV64-LABEL: @test_vmadd_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1657,7 +1657,7 @@ vuint32m4_t test_vmadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op // // CHECK-RV64-LABEL: @test_vmadd_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) { @@ -1667,7 +1667,7 @@ vuint32m4_t test_vmadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, // // CHECK-RV64-LABEL: @test_vmadd_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1677,7 +1677,7 @@ vuint32m8_t test_vmadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op // // CHECK-RV64-LABEL: @test_vmadd_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) { @@ -1687,7 +1687,7 @@ vuint32m8_t test_vmadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, // // CHECK-RV64-LABEL: @test_vmadd_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -1697,7 +1697,7 @@ vuint64m1_t test_vmadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t o // // CHECK-RV64-LABEL: @test_vmadd_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) { @@ -1707,7 +1707,7 @@ vuint64m1_t test_vmadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, // // CHECK-RV64-LABEL: @test_vmadd_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -1717,7 +1717,7 @@ vuint64m2_t test_vmadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t o // // CHECK-RV64-LABEL: @test_vmadd_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) { @@ -1727,7 +1727,7 @@ vuint64m2_t test_vmadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, // // CHECK-RV64-LABEL: @test_vmadd_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -1737,7 +1737,7 @@ vuint64m4_t test_vmadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t o // // CHECK-RV64-LABEL: @test_vmadd_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) { @@ -1747,7 +1747,7 @@ vuint64m4_t test_vmadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, // // CHECK-RV64-LABEL: @test_vmadd_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1757,7 +1757,7 @@ vuint64m8_t test_vmadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op // // CHECK-RV64-LABEL: @test_vmadd_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c index 83f3d6b..06a6f51 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c @@ -887,7 +887,7 @@ vuint64m8_t test_vmaxu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { // // CHECK-RV64-LABEL: @test_vmax_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmax_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -897,7 +897,7 @@ vint8mf8_t test_vmax_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t // // CHECK-RV64-LABEL: @test_vmax_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmax_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { @@ -907,7 +907,7 @@ vint8mf8_t test_vmax_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t // // CHECK-RV64-LABEL: @test_vmax_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmax_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -917,7 +917,7 @@ vint8mf4_t test_vmax_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t // // CHECK-RV64-LABEL: @test_vmax_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmax_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { @@ -927,7 +927,7 @@ vint8mf4_t test_vmax_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t // // CHECK-RV64-LABEL: @test_vmax_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmax_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -937,7 +937,7 @@ vint8mf2_t test_vmax_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t // // CHECK-RV64-LABEL: @test_vmax_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmax_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { @@ -947,7 +947,7 @@ vint8mf2_t test_vmax_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t // // CHECK-RV64-LABEL: @test_vmax_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmax_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -957,7 +957,7 @@ vint8m1_t test_vmax_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, // // CHECK-RV64-LABEL: @test_vmax_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmax_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { @@ -967,7 +967,7 @@ vint8m1_t test_vmax_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, // // CHECK-RV64-LABEL: @test_vmax_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmax_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -977,7 +977,7 @@ vint8m2_t test_vmax_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, // // CHECK-RV64-LABEL: @test_vmax_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmax_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { @@ -987,7 +987,7 @@ vint8m2_t test_vmax_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, // // CHECK-RV64-LABEL: @test_vmax_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmax_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -997,7 +997,7 @@ vint8m4_t test_vmax_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, // // CHECK-RV64-LABEL: @test_vmax_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmax_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { @@ -1007,7 +1007,7 @@ vint8m4_t test_vmax_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, // // CHECK-RV64-LABEL: @test_vmax_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmax_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -1017,7 +1017,7 @@ vint8m8_t test_vmax_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, // // CHECK-RV64-LABEL: @test_vmax_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmax_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { @@ -1027,7 +1027,7 @@ vint8m8_t test_vmax_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, // // CHECK-RV64-LABEL: @test_vmax_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmax_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -1037,7 +1037,7 @@ vint16mf4_t test_vmax_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vmax_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmax_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { @@ -1047,7 +1047,7 @@ vint16mf4_t test_vmax_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vmax_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmax_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -1057,7 +1057,7 @@ vint16mf2_t test_vmax_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vmax_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmax_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { @@ -1067,7 +1067,7 @@ vint16mf2_t test_vmax_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vmax_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmax_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -1077,7 +1077,7 @@ vint16m1_t test_vmax_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t // // CHECK-RV64-LABEL: @test_vmax_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmax_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { @@ -1087,7 +1087,7 @@ vint16m1_t test_vmax_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t // // CHECK-RV64-LABEL: @test_vmax_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmax_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -1097,7 +1097,7 @@ vint16m2_t test_vmax_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // // CHECK-RV64-LABEL: @test_vmax_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmax_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { @@ -1107,7 +1107,7 @@ vint16m2_t test_vmax_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // // CHECK-RV64-LABEL: @test_vmax_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmax_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1117,7 +1117,7 @@ vint16m4_t test_vmax_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // // CHECK-RV64-LABEL: @test_vmax_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmax_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { @@ -1127,7 +1127,7 @@ vint16m4_t test_vmax_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // // CHECK-RV64-LABEL: @test_vmax_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmax_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -1137,7 +1137,7 @@ vint16m8_t test_vmax_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // // CHECK-RV64-LABEL: @test_vmax_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmax_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { @@ -1147,7 +1147,7 @@ vint16m8_t test_vmax_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // // CHECK-RV64-LABEL: @test_vmax_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmax_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1157,7 +1157,7 @@ vint32mf2_t test_vmax_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m // // CHECK-RV64-LABEL: @test_vmax_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmax_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { @@ -1167,7 +1167,7 @@ vint32mf2_t test_vmax_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m // // CHECK-RV64-LABEL: @test_vmax_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmax_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1177,7 +1177,7 @@ vint32m1_t test_vmax_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t // // CHECK-RV64-LABEL: @test_vmax_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmax_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { @@ -1187,7 +1187,7 @@ vint32m1_t test_vmax_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t // // CHECK-RV64-LABEL: @test_vmax_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmax_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1197,7 +1197,7 @@ vint32m2_t test_vmax_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t // // CHECK-RV64-LABEL: @test_vmax_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmax_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { @@ -1207,7 +1207,7 @@ vint32m2_t test_vmax_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t // // CHECK-RV64-LABEL: @test_vmax_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmax_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1217,7 +1217,7 @@ vint32m4_t test_vmax_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // // CHECK-RV64-LABEL: @test_vmax_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmax_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { @@ -1227,7 +1227,7 @@ vint32m4_t test_vmax_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // // CHECK-RV64-LABEL: @test_vmax_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmax_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1237,7 +1237,7 @@ vint32m8_t test_vmax_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // // CHECK-RV64-LABEL: @test_vmax_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmax_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { @@ -1247,7 +1247,7 @@ vint32m8_t test_vmax_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // // CHECK-RV64-LABEL: @test_vmax_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmax_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -1257,7 +1257,7 @@ vint64m1_t test_vmax_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t // // CHECK-RV64-LABEL: @test_vmax_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmax_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { @@ -1267,7 +1267,7 @@ vint64m1_t test_vmax_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t // // CHECK-RV64-LABEL: @test_vmax_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmax_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -1277,7 +1277,7 @@ vint64m2_t test_vmax_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t // // CHECK-RV64-LABEL: @test_vmax_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmax_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { @@ -1287,7 +1287,7 @@ vint64m2_t test_vmax_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t // // CHECK-RV64-LABEL: @test_vmax_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmax_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -1297,7 +1297,7 @@ vint64m4_t test_vmax_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t // // CHECK-RV64-LABEL: @test_vmax_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmax_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { @@ -1307,7 +1307,7 @@ vint64m4_t test_vmax_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t // // CHECK-RV64-LABEL: @test_vmax_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmax_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -1317,7 +1317,7 @@ vint64m8_t test_vmax_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // // CHECK-RV64-LABEL: @test_vmax_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmax_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { @@ -1327,7 +1327,7 @@ vint64m8_t test_vmax_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmaxu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1337,7 +1337,7 @@ vuint8mf8_t test_vmaxu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m // // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmaxu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -1347,7 +1347,7 @@ vuint8mf8_t test_vmaxu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m // // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmaxu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1357,7 +1357,7 @@ vuint8mf4_t test_vmaxu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m // // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmaxu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -1367,7 +1367,7 @@ vuint8mf4_t test_vmaxu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m // // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmaxu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1377,7 +1377,7 @@ vuint8mf2_t test_vmaxu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m // // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmaxu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -1387,7 +1387,7 @@ vuint8mf2_t test_vmaxu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m // // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmaxu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1397,7 +1397,7 @@ vuint8m1_t test_vmaxu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmaxu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -1407,7 +1407,7 @@ vuint8m1_t test_vmaxu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmaxu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1417,7 +1417,7 @@ vuint8m2_t test_vmaxu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmaxu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -1427,7 +1427,7 @@ vuint8m2_t test_vmaxu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmaxu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -1437,7 +1437,7 @@ vuint8m4_t test_vmaxu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmaxu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -1447,7 +1447,7 @@ vuint8m4_t test_vmaxu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmaxu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -1457,7 +1457,7 @@ vuint8m8_t test_vmaxu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmaxu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -1467,7 +1467,7 @@ vuint8m8_t test_vmaxu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmaxu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -1477,7 +1477,7 @@ vuint16mf4_t test_vmaxu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmaxu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -1487,7 +1487,7 @@ vuint16mf4_t test_vmaxu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmaxu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -1497,7 +1497,7 @@ vuint16mf2_t test_vmaxu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmaxu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -1507,7 +1507,7 @@ vuint16mf2_t test_vmaxu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmaxu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -1517,7 +1517,7 @@ vuint16m1_t test_vmaxu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16 // // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmaxu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -1527,7 +1527,7 @@ vuint16m1_t test_vmaxu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16 // // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmaxu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -1537,7 +1537,7 @@ vuint16m2_t test_vmaxu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m // // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmaxu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -1547,7 +1547,7 @@ vuint16m2_t test_vmaxu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m // // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmaxu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -1557,7 +1557,7 @@ vuint16m4_t test_vmaxu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m // // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmaxu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -1567,7 +1567,7 @@ vuint16m4_t test_vmaxu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m // // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmaxu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -1577,7 +1577,7 @@ vuint16m8_t test_vmaxu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m // // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmaxu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -1587,7 +1587,7 @@ vuint16m8_t test_vmaxu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m // // CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmaxu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1597,7 +1597,7 @@ vuint32mf2_t test_vmaxu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmaxu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1607,7 +1607,7 @@ vuint32mf2_t test_vmaxu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmaxu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1617,7 +1617,7 @@ vuint32m1_t test_vmaxu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32 // // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmaxu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1627,7 +1627,7 @@ vuint32m1_t test_vmaxu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32 // // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmaxu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1637,7 +1637,7 @@ vuint32m2_t test_vmaxu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32 // // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmaxu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1647,7 +1647,7 @@ vuint32m2_t test_vmaxu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32 // // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmaxu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1657,7 +1657,7 @@ vuint32m4_t test_vmaxu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmaxu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1667,7 +1667,7 @@ vuint32m4_t test_vmaxu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmaxu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1677,7 +1677,7 @@ vuint32m8_t test_vmaxu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmaxu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -1687,7 +1687,7 @@ vuint32m8_t test_vmaxu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmaxu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -1697,7 +1697,7 @@ vuint64m1_t test_vmaxu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64 // // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmaxu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -1707,7 +1707,7 @@ vuint64m1_t test_vmaxu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64 // // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmaxu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -1717,7 +1717,7 @@ vuint64m2_t test_vmaxu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64 // // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmaxu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -1727,7 +1727,7 @@ vuint64m2_t test_vmaxu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64 // // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmaxu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -1737,7 +1737,7 @@ vuint64m4_t test_vmaxu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64 // // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmaxu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -1747,7 +1747,7 @@ vuint64m4_t test_vmaxu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64 // // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmaxu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1757,7 +1757,7 @@ vuint64m8_t test_vmaxu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmaxu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmerge.c index 6e336d4..ecbe041 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmerge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmerge.c @@ -8,7 +8,7 @@ // // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmerge_vvm_i8mf8(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, @@ -19,7 +19,7 @@ vint8mf8_t test_vmerge_vvm_i8mf8(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmerge_vxm_i8mf8(vbool64_t mask, vint8mf8_t op1, int8_t op2, @@ -30,7 +30,7 @@ vint8mf8_t test_vmerge_vxm_i8mf8(vbool64_t mask, vint8mf8_t op1, int8_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmerge_vvm_i8mf4(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, @@ -41,7 +41,7 @@ vint8mf4_t test_vmerge_vvm_i8mf4(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmerge_vxm_i8mf4(vbool32_t mask, vint8mf4_t op1, int8_t op2, @@ -52,7 +52,7 @@ vint8mf4_t test_vmerge_vxm_i8mf4(vbool32_t mask, vint8mf4_t op1, int8_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmerge_vvm_i8mf2(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, @@ -63,7 +63,7 @@ vint8mf2_t test_vmerge_vvm_i8mf2(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmerge_vxm_i8mf2(vbool16_t mask, vint8mf2_t op1, int8_t op2, @@ -74,7 +74,7 @@ vint8mf2_t test_vmerge_vxm_i8mf2(vbool16_t mask, vint8mf2_t op1, int8_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmerge_vvm_i8m1(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, @@ -85,7 +85,7 @@ vint8m1_t test_vmerge_vvm_i8m1(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmerge_vxm_i8m1(vbool8_t mask, vint8m1_t op1, int8_t op2, @@ -96,7 +96,7 @@ vint8m1_t test_vmerge_vxm_i8m1(vbool8_t mask, vint8m1_t op1, int8_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmerge_vvm_i8m2(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, @@ -107,7 +107,7 @@ vint8m2_t test_vmerge_vvm_i8m2(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmerge_vxm_i8m2(vbool4_t mask, vint8m2_t op1, int8_t op2, @@ -118,7 +118,7 @@ vint8m2_t test_vmerge_vxm_i8m2(vbool4_t mask, vint8m2_t op1, int8_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmerge_vvm_i8m4(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, @@ -129,7 +129,7 @@ vint8m4_t test_vmerge_vvm_i8m4(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmerge_vxm_i8m4(vbool2_t mask, vint8m4_t op1, int8_t op2, @@ -140,7 +140,7 @@ vint8m4_t test_vmerge_vxm_i8m4(vbool2_t mask, vint8m4_t op1, int8_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmerge_vvm_i8m8(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, @@ -151,7 +151,7 @@ vint8m8_t test_vmerge_vvm_i8m8(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmerge_vxm_i8m8(vbool1_t mask, vint8m8_t op1, int8_t op2, @@ -162,7 +162,7 @@ vint8m8_t test_vmerge_vxm_i8m8(vbool1_t mask, vint8m8_t op1, int8_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmerge_vvm_i16mf4(vbool64_t mask, vint16mf4_t op1, @@ -173,7 +173,7 @@ vint16mf4_t test_vmerge_vvm_i16mf4(vbool64_t mask, vint16mf4_t op1, // // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmerge_vxm_i16mf4(vbool64_t mask, vint16mf4_t op1, int16_t op2, @@ -184,7 +184,7 @@ vint16mf4_t test_vmerge_vxm_i16mf4(vbool64_t mask, vint16mf4_t op1, int16_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmerge_vvm_i16mf2(vbool32_t mask, vint16mf2_t op1, @@ -195,7 +195,7 @@ vint16mf2_t test_vmerge_vvm_i16mf2(vbool32_t mask, vint16mf2_t op1, // // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmerge_vxm_i16mf2(vbool32_t mask, vint16mf2_t op1, int16_t op2, @@ -206,7 +206,7 @@ vint16mf2_t test_vmerge_vxm_i16mf2(vbool32_t mask, vint16mf2_t op1, int16_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmerge_vvm_i16m1(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, @@ -217,7 +217,7 @@ vint16m1_t test_vmerge_vvm_i16m1(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmerge_vxm_i16m1(vbool16_t mask, vint16m1_t op1, int16_t op2, @@ -228,7 +228,7 @@ vint16m1_t test_vmerge_vxm_i16m1(vbool16_t mask, vint16m1_t op1, int16_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmerge_vvm_i16m2(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, @@ -239,7 +239,7 @@ vint16m2_t test_vmerge_vvm_i16m2(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmerge_vxm_i16m2(vbool8_t mask, vint16m2_t op1, int16_t op2, @@ -250,7 +250,7 @@ vint16m2_t test_vmerge_vxm_i16m2(vbool8_t mask, vint16m2_t op1, int16_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmerge_vvm_i16m4(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, @@ -261,7 +261,7 @@ vint16m4_t test_vmerge_vvm_i16m4(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmerge_vxm_i16m4(vbool4_t mask, vint16m4_t op1, int16_t op2, @@ -272,7 +272,7 @@ vint16m4_t test_vmerge_vxm_i16m4(vbool4_t mask, vint16m4_t op1, int16_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmerge_vvm_i16m8(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, @@ -283,7 +283,7 @@ vint16m8_t test_vmerge_vvm_i16m8(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmerge_vxm_i16m8(vbool2_t mask, vint16m8_t op1, int16_t op2, @@ -294,7 +294,7 @@ vint16m8_t test_vmerge_vxm_i16m8(vbool2_t mask, vint16m8_t op1, int16_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmerge_vvm_i32mf2(vbool64_t mask, vint32mf2_t op1, @@ -305,7 +305,7 @@ vint32mf2_t test_vmerge_vvm_i32mf2(vbool64_t mask, vint32mf2_t op1, // // CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmerge_vxm_i32mf2(vbool64_t mask, vint32mf2_t op1, int32_t op2, @@ -316,7 +316,7 @@ vint32mf2_t test_vmerge_vxm_i32mf2(vbool64_t mask, vint32mf2_t op1, int32_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmerge_vvm_i32m1(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, @@ -327,7 +327,7 @@ vint32m1_t test_vmerge_vvm_i32m1(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmerge_vxm_i32m1(vbool32_t mask, vint32m1_t op1, int32_t op2, @@ -338,7 +338,7 @@ vint32m1_t test_vmerge_vxm_i32m1(vbool32_t mask, vint32m1_t op1, int32_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmerge_vvm_i32m2(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, @@ -349,7 +349,7 @@ vint32m2_t test_vmerge_vvm_i32m2(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmerge_vxm_i32m2(vbool16_t mask, vint32m2_t op1, int32_t op2, @@ -360,7 +360,7 @@ vint32m2_t test_vmerge_vxm_i32m2(vbool16_t mask, vint32m2_t op1, int32_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmerge_vvm_i32m4(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, @@ -371,7 +371,7 @@ vint32m4_t test_vmerge_vvm_i32m4(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmerge_vxm_i32m4(vbool8_t mask, vint32m4_t op1, int32_t op2, @@ -382,7 +382,7 @@ vint32m4_t test_vmerge_vxm_i32m4(vbool8_t mask, vint32m4_t op1, int32_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmerge_vvm_i32m8(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, @@ -393,7 +393,7 @@ vint32m8_t test_vmerge_vvm_i32m8(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmerge_vxm_i32m8(vbool4_t mask, vint32m8_t op1, int32_t op2, @@ -404,7 +404,7 @@ vint32m8_t test_vmerge_vxm_i32m8(vbool4_t mask, vint32m8_t op1, int32_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmerge_vvm_i64m1(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, @@ -415,7 +415,7 @@ vint64m1_t test_vmerge_vvm_i64m1(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmerge_vxm_i64m1(vbool64_t mask, vint64m1_t op1, int64_t op2, @@ -426,7 +426,7 @@ vint64m1_t test_vmerge_vxm_i64m1(vbool64_t mask, vint64m1_t op1, int64_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmerge_vvm_i64m2(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, @@ -437,7 +437,7 @@ vint64m2_t test_vmerge_vvm_i64m2(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmerge_vxm_i64m2(vbool32_t mask, vint64m2_t op1, int64_t op2, @@ -448,7 +448,7 @@ vint64m2_t test_vmerge_vxm_i64m2(vbool32_t mask, vint64m2_t op1, int64_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmerge_vvm_i64m4(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, @@ -459,7 +459,7 @@ vint64m4_t test_vmerge_vvm_i64m4(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmerge_vxm_i64m4(vbool16_t mask, vint64m4_t op1, int64_t op2, @@ -470,7 +470,7 @@ vint64m4_t test_vmerge_vxm_i64m4(vbool16_t mask, vint64m4_t op1, int64_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmerge_vvm_i64m8(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, @@ -481,7 +481,7 @@ vint64m8_t test_vmerge_vvm_i64m8(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmerge_vxm_i64m8(vbool8_t mask, vint64m8_t op1, int64_t op2, @@ -492,7 +492,7 @@ vint64m8_t test_vmerge_vxm_i64m8(vbool8_t mask, vint64m8_t op1, int64_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmerge_vvm_u8mf8(vbool64_t mask, vuint8mf8_t op1, @@ -503,7 +503,7 @@ vuint8mf8_t test_vmerge_vvm_u8mf8(vbool64_t mask, vuint8mf8_t op1, // // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmerge_vxm_u8mf8(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, @@ -514,7 +514,7 @@ vuint8mf8_t test_vmerge_vxm_u8mf8(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmerge_vvm_u8mf4(vbool32_t mask, vuint8mf4_t op1, @@ -525,7 +525,7 @@ vuint8mf4_t test_vmerge_vvm_u8mf4(vbool32_t mask, vuint8mf4_t op1, // // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmerge_vxm_u8mf4(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, @@ -536,7 +536,7 @@ vuint8mf4_t test_vmerge_vxm_u8mf4(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmerge_vvm_u8mf2(vbool16_t mask, vuint8mf2_t op1, @@ -547,7 +547,7 @@ vuint8mf2_t test_vmerge_vvm_u8mf2(vbool16_t mask, vuint8mf2_t op1, // // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmerge_vxm_u8mf2(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, @@ -558,7 +558,7 @@ vuint8mf2_t test_vmerge_vxm_u8mf2(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmerge_vvm_u8m1(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, @@ -569,7 +569,7 @@ vuint8m1_t test_vmerge_vvm_u8m1(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmerge_vxm_u8m1(vbool8_t mask, vuint8m1_t op1, uint8_t op2, @@ -580,7 +580,7 @@ vuint8m1_t test_vmerge_vxm_u8m1(vbool8_t mask, vuint8m1_t op1, uint8_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmerge_vvm_u8m2(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, @@ -591,7 +591,7 @@ vuint8m2_t test_vmerge_vvm_u8m2(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmerge_vxm_u8m2(vbool4_t mask, vuint8m2_t op1, uint8_t op2, @@ -602,7 +602,7 @@ vuint8m2_t test_vmerge_vxm_u8m2(vbool4_t mask, vuint8m2_t op1, uint8_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmerge_vvm_u8m4(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, @@ -613,7 +613,7 @@ vuint8m4_t test_vmerge_vvm_u8m4(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmerge_vxm_u8m4(vbool2_t mask, vuint8m4_t op1, uint8_t op2, @@ -624,7 +624,7 @@ vuint8m4_t test_vmerge_vxm_u8m4(vbool2_t mask, vuint8m4_t op1, uint8_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmerge_vvm_u8m8(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, @@ -635,7 +635,7 @@ vuint8m8_t test_vmerge_vvm_u8m8(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmerge_vxm_u8m8(vbool1_t mask, vuint8m8_t op1, uint8_t op2, @@ -646,7 +646,7 @@ vuint8m8_t test_vmerge_vxm_u8m8(vbool1_t mask, vuint8m8_t op1, uint8_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmerge_vvm_u16mf4(vbool64_t mask, vuint16mf4_t op1, @@ -657,7 +657,7 @@ vuint16mf4_t test_vmerge_vvm_u16mf4(vbool64_t mask, vuint16mf4_t op1, // // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmerge_vxm_u16mf4(vbool64_t mask, vuint16mf4_t op1, @@ -668,7 +668,7 @@ vuint16mf4_t test_vmerge_vxm_u16mf4(vbool64_t mask, vuint16mf4_t op1, // // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmerge_vvm_u16mf2(vbool32_t mask, vuint16mf2_t op1, @@ -679,7 +679,7 @@ vuint16mf2_t test_vmerge_vvm_u16mf2(vbool32_t mask, vuint16mf2_t op1, // // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmerge_vxm_u16mf2(vbool32_t mask, vuint16mf2_t op1, @@ -690,7 +690,7 @@ vuint16mf2_t test_vmerge_vxm_u16mf2(vbool32_t mask, vuint16mf2_t op1, // // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmerge_vvm_u16m1(vbool16_t mask, vuint16m1_t op1, @@ -701,7 +701,7 @@ vuint16m1_t test_vmerge_vvm_u16m1(vbool16_t mask, vuint16m1_t op1, // // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmerge_vxm_u16m1(vbool16_t mask, vuint16m1_t op1, uint16_t op2, @@ -712,7 +712,7 @@ vuint16m1_t test_vmerge_vxm_u16m1(vbool16_t mask, vuint16m1_t op1, uint16_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmerge_vvm_u16m2(vbool8_t mask, vuint16m2_t op1, @@ -723,7 +723,7 @@ vuint16m2_t test_vmerge_vvm_u16m2(vbool8_t mask, vuint16m2_t op1, // // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmerge_vxm_u16m2(vbool8_t mask, vuint16m2_t op1, uint16_t op2, @@ -734,7 +734,7 @@ vuint16m2_t test_vmerge_vxm_u16m2(vbool8_t mask, vuint16m2_t op1, uint16_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmerge_vvm_u16m4(vbool4_t mask, vuint16m4_t op1, @@ -745,7 +745,7 @@ vuint16m4_t test_vmerge_vvm_u16m4(vbool4_t mask, vuint16m4_t op1, // // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmerge_vxm_u16m4(vbool4_t mask, vuint16m4_t op1, uint16_t op2, @@ -756,7 +756,7 @@ vuint16m4_t test_vmerge_vxm_u16m4(vbool4_t mask, vuint16m4_t op1, uint16_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmerge_vvm_u16m8(vbool2_t mask, vuint16m8_t op1, @@ -767,7 +767,7 @@ vuint16m8_t test_vmerge_vvm_u16m8(vbool2_t mask, vuint16m8_t op1, // // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmerge_vxm_u16m8(vbool2_t mask, vuint16m8_t op1, uint16_t op2, @@ -778,7 +778,7 @@ vuint16m8_t test_vmerge_vxm_u16m8(vbool2_t mask, vuint16m8_t op1, uint16_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmerge_vvm_u32mf2(vbool64_t mask, vuint32mf2_t op1, @@ -789,7 +789,7 @@ vuint32mf2_t test_vmerge_vvm_u32mf2(vbool64_t mask, vuint32mf2_t op1, // // CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmerge_vxm_u32mf2(vbool64_t mask, vuint32mf2_t op1, @@ -800,7 +800,7 @@ vuint32mf2_t test_vmerge_vxm_u32mf2(vbool64_t mask, vuint32mf2_t op1, // // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmerge_vvm_u32m1(vbool32_t mask, vuint32m1_t op1, @@ -811,7 +811,7 @@ vuint32m1_t test_vmerge_vvm_u32m1(vbool32_t mask, vuint32m1_t op1, // // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmerge_vxm_u32m1(vbool32_t mask, vuint32m1_t op1, uint32_t op2, @@ -822,7 +822,7 @@ vuint32m1_t test_vmerge_vxm_u32m1(vbool32_t mask, vuint32m1_t op1, uint32_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmerge_vvm_u32m2(vbool16_t mask, vuint32m2_t op1, @@ -833,7 +833,7 @@ vuint32m2_t test_vmerge_vvm_u32m2(vbool16_t mask, vuint32m2_t op1, // // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmerge_vxm_u32m2(vbool16_t mask, vuint32m2_t op1, uint32_t op2, @@ -844,7 +844,7 @@ vuint32m2_t test_vmerge_vxm_u32m2(vbool16_t mask, vuint32m2_t op1, uint32_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmerge_vvm_u32m4(vbool8_t mask, vuint32m4_t op1, @@ -855,7 +855,7 @@ vuint32m4_t test_vmerge_vvm_u32m4(vbool8_t mask, vuint32m4_t op1, // // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmerge_vxm_u32m4(vbool8_t mask, vuint32m4_t op1, uint32_t op2, @@ -866,7 +866,7 @@ vuint32m4_t test_vmerge_vxm_u32m4(vbool8_t mask, vuint32m4_t op1, uint32_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmerge_vvm_u32m8(vbool4_t mask, vuint32m8_t op1, @@ -877,7 +877,7 @@ vuint32m8_t test_vmerge_vvm_u32m8(vbool4_t mask, vuint32m8_t op1, // // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmerge_vxm_u32m8(vbool4_t mask, vuint32m8_t op1, uint32_t op2, @@ -888,7 +888,7 @@ vuint32m8_t test_vmerge_vxm_u32m8(vbool4_t mask, vuint32m8_t op1, uint32_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmerge_vvm_u64m1(vbool64_t mask, vuint64m1_t op1, @@ -899,7 +899,7 @@ vuint64m1_t test_vmerge_vvm_u64m1(vbool64_t mask, vuint64m1_t op1, // // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmerge_vxm_u64m1(vbool64_t mask, vuint64m1_t op1, uint64_t op2, @@ -910,7 +910,7 @@ vuint64m1_t test_vmerge_vxm_u64m1(vbool64_t mask, vuint64m1_t op1, uint64_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmerge_vvm_u64m2(vbool32_t mask, vuint64m2_t op1, @@ -921,7 +921,7 @@ vuint64m2_t test_vmerge_vvm_u64m2(vbool32_t mask, vuint64m2_t op1, // // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmerge_vxm_u64m2(vbool32_t mask, vuint64m2_t op1, uint64_t op2, @@ -932,7 +932,7 @@ vuint64m2_t test_vmerge_vxm_u64m2(vbool32_t mask, vuint64m2_t op1, uint64_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmerge_vvm_u64m4(vbool16_t mask, vuint64m4_t op1, @@ -943,7 +943,7 @@ vuint64m4_t test_vmerge_vvm_u64m4(vbool16_t mask, vuint64m4_t op1, // // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmerge_vxm_u64m4(vbool16_t mask, vuint64m4_t op1, uint64_t op2, @@ -954,7 +954,7 @@ vuint64m4_t test_vmerge_vxm_u64m4(vbool16_t mask, vuint64m4_t op1, uint64_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmerge_vvm_u64m8(vbool8_t mask, vuint64m8_t op1, @@ -965,7 +965,7 @@ vuint64m8_t test_vmerge_vvm_u64m8(vbool8_t mask, vuint64m8_t op1, // // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmerge_vxm_u64m8(vbool8_t mask, vuint64m8_t op1, uint64_t op2, @@ -976,7 +976,7 @@ vuint64m8_t test_vmerge_vxm_u64m8(vbool8_t mask, vuint64m8_t op1, uint64_t op2, // // CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vmerge_vvm_f32mf2(vbool64_t mask, vfloat32mf2_t op1, @@ -987,7 +987,7 @@ vfloat32mf2_t test_vmerge_vvm_f32mf2(vbool64_t mask, vfloat32mf2_t op1, // // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vmerge_vvm_f32m1(vbool32_t mask, vfloat32m1_t op1, @@ -998,7 +998,7 @@ vfloat32m1_t test_vmerge_vvm_f32m1(vbool32_t mask, vfloat32m1_t op1, // // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vmerge_vvm_f32m2(vbool16_t mask, vfloat32m2_t op1, @@ -1009,7 +1009,7 @@ vfloat32m2_t test_vmerge_vvm_f32m2(vbool16_t mask, vfloat32m2_t op1, // // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vmerge_vvm_f32m4(vbool8_t mask, vfloat32m4_t op1, @@ -1020,7 +1020,7 @@ vfloat32m4_t test_vmerge_vvm_f32m4(vbool8_t mask, vfloat32m4_t op1, // // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vmerge_vvm_f32m8(vbool4_t mask, vfloat32m8_t op1, @@ -1031,7 +1031,7 @@ vfloat32m8_t test_vmerge_vvm_f32m8(vbool4_t mask, vfloat32m8_t op1, // // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vmerge_vvm_f64m1(vbool64_t mask, vfloat64m1_t op1, @@ -1042,7 +1042,7 @@ vfloat64m1_t test_vmerge_vvm_f64m1(vbool64_t mask, vfloat64m1_t op1, // // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vmerge_vvm_f64m2(vbool32_t mask, vfloat64m2_t op1, @@ -1053,7 +1053,7 @@ vfloat64m2_t test_vmerge_vvm_f64m2(vbool32_t mask, vfloat64m2_t op1, // // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vmerge_vvm_f64m4(vbool16_t mask, vfloat64m4_t op1, @@ -1064,7 +1064,7 @@ vfloat64m4_t test_vmerge_vvm_f64m4(vbool16_t mask, vfloat64m4_t op1, // // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vmerge_vvm_f64m8(vbool8_t mask, vfloat64m8_t op1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfeq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfeq.c index 3415ba7..d7f5b59 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfeq.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfeq.c @@ -194,7 +194,7 @@ vbool8_t test_vmfeq_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { // // CHECK-RV64-LABEL: @test_vmfeq_vv_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfeq_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -206,7 +206,7 @@ vbool64_t test_vmfeq_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfeq_vf_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfeq_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -217,7 +217,7 @@ vbool64_t test_vmfeq_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfeq_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -229,7 +229,7 @@ vbool32_t test_vmfeq_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfeq_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -240,7 +240,7 @@ vbool32_t test_vmfeq_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfeq_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -252,7 +252,7 @@ vbool16_t test_vmfeq_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfeq_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -263,7 +263,7 @@ vbool16_t test_vmfeq_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfeq_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -275,7 +275,7 @@ vbool8_t test_vmfeq_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfeq_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -286,7 +286,7 @@ vbool8_t test_vmfeq_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfeq_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -298,7 +298,7 @@ vbool4_t test_vmfeq_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfeq_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -309,7 +309,7 @@ vbool4_t test_vmfeq_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfeq_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -321,7 +321,7 @@ vbool64_t test_vmfeq_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfeq_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -332,7 +332,7 @@ vbool64_t test_vmfeq_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfeq_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -344,7 +344,7 @@ vbool32_t test_vmfeq_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfeq_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -355,7 +355,7 @@ vbool32_t test_vmfeq_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfeq_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -367,7 +367,7 @@ vbool16_t test_vmfeq_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfeq_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -378,7 +378,7 @@ vbool16_t test_vmfeq_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfeq_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -390,7 +390,7 @@ vbool8_t test_vmfeq_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfeq_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfge.c index cb0b456..e4624ab 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfge.c @@ -188,7 +188,7 @@ vbool8_t test_vmfge_vf_f64m8_b8 (vfloat64m8_t op1, double op2, size_t vl) { // // CHECK-RV64-LABEL: @test_vmfge_vv_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfge_vv_f32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { @@ -198,7 +198,7 @@ vbool64_t test_vmfge_vv_f32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloa // // CHECK-RV64-LABEL: @test_vmfge_vf_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfge_vf_f32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { @@ -208,7 +208,7 @@ vbool64_t test_vmfge_vf_f32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloa // // CHECK-RV64-LABEL: @test_vmfge_vv_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfge_vv_f32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { @@ -218,7 +218,7 @@ vbool32_t test_vmfge_vv_f32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat // // CHECK-RV64-LABEL: @test_vmfge_vf_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfge_vf_f32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { @@ -228,7 +228,7 @@ vbool32_t test_vmfge_vf_f32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat // // CHECK-RV64-LABEL: @test_vmfge_vv_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfge_vv_f32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { @@ -238,7 +238,7 @@ vbool16_t test_vmfge_vv_f32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat // // CHECK-RV64-LABEL: @test_vmfge_vf_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfge_vf_f32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { @@ -248,7 +248,7 @@ vbool16_t test_vmfge_vf_f32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat // // CHECK-RV64-LABEL: @test_vmfge_vv_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfge_vv_f32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { @@ -258,7 +258,7 @@ vbool8_t test_vmfge_vv_f32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat32m4 // // CHECK-RV64-LABEL: @test_vmfge_vf_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfge_vf_f32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { @@ -268,7 +268,7 @@ vbool8_t test_vmfge_vf_f32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat32m4 // // CHECK-RV64-LABEL: @test_vmfge_vv_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfge_vv_f32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { @@ -278,7 +278,7 @@ vbool4_t test_vmfge_vv_f32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat32m8 // // CHECK-RV64-LABEL: @test_vmfge_vf_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfge_vf_f32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { @@ -288,7 +288,7 @@ vbool4_t test_vmfge_vf_f32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat32m8 // // CHECK-RV64-LABEL: @test_vmfge_vv_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfge_vv_f64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { @@ -298,7 +298,7 @@ vbool64_t test_vmfge_vv_f64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat // // CHECK-RV64-LABEL: @test_vmfge_vf_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfge_vf_f64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { @@ -308,7 +308,7 @@ vbool64_t test_vmfge_vf_f64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat // // CHECK-RV64-LABEL: @test_vmfge_vv_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfge_vv_f64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { @@ -318,7 +318,7 @@ vbool32_t test_vmfge_vv_f64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat // // CHECK-RV64-LABEL: @test_vmfge_vf_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfge_vf_f64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { @@ -328,7 +328,7 @@ vbool32_t test_vmfge_vf_f64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat // // CHECK-RV64-LABEL: @test_vmfge_vv_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfge_vv_f64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { @@ -338,7 +338,7 @@ vbool16_t test_vmfge_vv_f64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat // // CHECK-RV64-LABEL: @test_vmfge_vf_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfge_vf_f64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { @@ -348,7 +348,7 @@ vbool16_t test_vmfge_vf_f64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat // // CHECK-RV64-LABEL: @test_vmfge_vv_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfge_vv_f64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { @@ -358,7 +358,7 @@ vbool8_t test_vmfge_vv_f64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat64m8 // // CHECK-RV64-LABEL: @test_vmfge_vf_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfge_vf_f64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfgt.c index 889df32..2cf1abcc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfgt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfgt.c @@ -188,7 +188,7 @@ vbool8_t test_vmfgt_vf_f64m8_b8 (vfloat64m8_t op1, double op2, size_t vl) { // // CHECK-RV64-LABEL: @test_vmfgt_vv_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfgt_vv_f32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { @@ -198,7 +198,7 @@ vbool64_t test_vmfgt_vv_f32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloa // // CHECK-RV64-LABEL: @test_vmfgt_vf_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfgt_vf_f32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { @@ -208,7 +208,7 @@ vbool64_t test_vmfgt_vf_f32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloa // // CHECK-RV64-LABEL: @test_vmfgt_vv_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfgt_vv_f32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { @@ -218,7 +218,7 @@ vbool32_t test_vmfgt_vv_f32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat // // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfgt_vf_f32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { @@ -228,7 +228,7 @@ vbool32_t test_vmfgt_vf_f32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat // // CHECK-RV64-LABEL: @test_vmfgt_vv_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfgt_vv_f32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { @@ -238,7 +238,7 @@ vbool16_t test_vmfgt_vv_f32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat // // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfgt_vf_f32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { @@ -248,7 +248,7 @@ vbool16_t test_vmfgt_vf_f32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat // // CHECK-RV64-LABEL: @test_vmfgt_vv_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfgt_vv_f32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { @@ -258,7 +258,7 @@ vbool8_t test_vmfgt_vv_f32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat32m4 // // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfgt_vf_f32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { @@ -268,7 +268,7 @@ vbool8_t test_vmfgt_vf_f32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat32m4 // // CHECK-RV64-LABEL: @test_vmfgt_vv_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfgt_vv_f32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { @@ -278,7 +278,7 @@ vbool4_t test_vmfgt_vv_f32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat32m8 // // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfgt_vf_f32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { @@ -288,7 +288,7 @@ vbool4_t test_vmfgt_vf_f32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat32m8 // // CHECK-RV64-LABEL: @test_vmfgt_vv_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfgt_vv_f64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { @@ -298,7 +298,7 @@ vbool64_t test_vmfgt_vv_f64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat // // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfgt_vf_f64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { @@ -308,7 +308,7 @@ vbool64_t test_vmfgt_vf_f64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat // // CHECK-RV64-LABEL: @test_vmfgt_vv_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfgt_vv_f64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { @@ -318,7 +318,7 @@ vbool32_t test_vmfgt_vv_f64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat // // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfgt_vf_f64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { @@ -328,7 +328,7 @@ vbool32_t test_vmfgt_vf_f64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat // // CHECK-RV64-LABEL: @test_vmfgt_vv_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfgt_vv_f64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { @@ -338,7 +338,7 @@ vbool16_t test_vmfgt_vv_f64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat // // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfgt_vf_f64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { @@ -348,7 +348,7 @@ vbool16_t test_vmfgt_vf_f64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat // // CHECK-RV64-LABEL: @test_vmfgt_vv_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfgt_vv_f64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { @@ -358,7 +358,7 @@ vbool8_t test_vmfgt_vv_f64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat64m8 // // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfgt_vf_f64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfle.c index 3cfeeaa..fe16113 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfle.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfle.c @@ -194,7 +194,7 @@ vbool8_t test_vmfle_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { // // CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfle_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -206,7 +206,7 @@ vbool64_t test_vmfle_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfle_vf_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfle_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -217,7 +217,7 @@ vbool64_t test_vmfle_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfle_vv_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfle_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -229,7 +229,7 @@ vbool32_t test_vmfle_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfle_vf_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfle_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -240,7 +240,7 @@ vbool32_t test_vmfle_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfle_vv_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfle_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -252,7 +252,7 @@ vbool16_t test_vmfle_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfle_vf_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfle_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -263,7 +263,7 @@ vbool16_t test_vmfle_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfle_vv_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfle_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -275,7 +275,7 @@ vbool8_t test_vmfle_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfle_vf_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfle_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -286,7 +286,7 @@ vbool8_t test_vmfle_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfle_vv_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfle_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -298,7 +298,7 @@ vbool4_t test_vmfle_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfle_vf_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfle_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -309,7 +309,7 @@ vbool4_t test_vmfle_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfle_vv_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfle_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -321,7 +321,7 @@ vbool64_t test_vmfle_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfle_vf_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfle_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -332,7 +332,7 @@ vbool64_t test_vmfle_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfle_vv_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfle_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -344,7 +344,7 @@ vbool32_t test_vmfle_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfle_vf_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfle_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -355,7 +355,7 @@ vbool32_t test_vmfle_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfle_vv_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfle_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -367,7 +367,7 @@ vbool16_t test_vmfle_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfle_vf_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfle_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -378,7 +378,7 @@ vbool16_t test_vmfle_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfle_vv_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfle_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -390,7 +390,7 @@ vbool8_t test_vmfle_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfle_vf_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfle_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmflt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmflt.c index d1c5fd5..231433d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmflt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmflt.c @@ -194,7 +194,7 @@ vbool8_t test_vmflt_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { // // CHECK-RV64-LABEL: @test_vmflt_vv_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmflt_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -206,7 +206,7 @@ vbool64_t test_vmflt_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmflt_vf_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmflt_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -217,7 +217,7 @@ vbool64_t test_vmflt_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmflt_vv_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmflt_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -229,7 +229,7 @@ vbool32_t test_vmflt_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmflt_vf_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmflt_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -240,7 +240,7 @@ vbool32_t test_vmflt_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmflt_vv_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmflt_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -252,7 +252,7 @@ vbool16_t test_vmflt_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmflt_vf_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmflt_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -263,7 +263,7 @@ vbool16_t test_vmflt_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmflt_vv_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmflt_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -275,7 +275,7 @@ vbool8_t test_vmflt_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmflt_vf_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmflt_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -286,7 +286,7 @@ vbool8_t test_vmflt_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmflt_vv_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmflt_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -298,7 +298,7 @@ vbool4_t test_vmflt_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmflt_vf_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmflt_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -309,7 +309,7 @@ vbool4_t test_vmflt_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmflt_vv_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmflt_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -321,7 +321,7 @@ vbool64_t test_vmflt_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmflt_vf_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmflt_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -332,7 +332,7 @@ vbool64_t test_vmflt_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmflt_vv_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmflt_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -344,7 +344,7 @@ vbool32_t test_vmflt_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmflt_vf_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmflt_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -355,7 +355,7 @@ vbool32_t test_vmflt_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmflt_vv_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmflt_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -367,7 +367,7 @@ vbool16_t test_vmflt_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmflt_vf_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmflt_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -378,7 +378,7 @@ vbool16_t test_vmflt_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmflt_vv_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmflt_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -390,7 +390,7 @@ vbool8_t test_vmflt_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmflt_vf_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmflt_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfne.c index 5426e25..7a14a8e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfne.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfne.c @@ -194,7 +194,7 @@ vbool8_t test_vmfne_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { // // CHECK-RV64-LABEL: @test_vmfne_vv_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfne_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -206,7 +206,7 @@ vbool64_t test_vmfne_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfne_vf_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfne_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -217,7 +217,7 @@ vbool64_t test_vmfne_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfne_vv_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfne_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -229,7 +229,7 @@ vbool32_t test_vmfne_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfne_vf_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfne_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -240,7 +240,7 @@ vbool32_t test_vmfne_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfne_vv_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfne_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -252,7 +252,7 @@ vbool16_t test_vmfne_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfne_vf_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfne_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -263,7 +263,7 @@ vbool16_t test_vmfne_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfne_vv_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfne_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -275,7 +275,7 @@ vbool8_t test_vmfne_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfne_vf_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfne_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -286,7 +286,7 @@ vbool8_t test_vmfne_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfne_vv_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfne_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -298,7 +298,7 @@ vbool4_t test_vmfne_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfne_vf_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfne_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -309,7 +309,7 @@ vbool4_t test_vmfne_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfne_vv_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfne_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -321,7 +321,7 @@ vbool64_t test_vmfne_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfne_vf_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfne_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -332,7 +332,7 @@ vbool64_t test_vmfne_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfne_vv_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfne_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -344,7 +344,7 @@ vbool32_t test_vmfne_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfne_vf_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfne_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -355,7 +355,7 @@ vbool32_t test_vmfne_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfne_vv_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfne_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -367,7 +367,7 @@ vbool16_t test_vmfne_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfne_vf_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfne_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -378,7 +378,7 @@ vbool16_t test_vmfne_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfne_vv_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfne_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -390,7 +390,7 @@ vbool8_t test_vmfne_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmfne_vf_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfne_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c index c8d26b4..7b78825 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c @@ -887,7 +887,7 @@ vuint64m8_t test_vminu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { // // CHECK-RV64-LABEL: @test_vmin_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmin_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -897,7 +897,7 @@ vint8mf8_t test_vmin_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t // // CHECK-RV64-LABEL: @test_vmin_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmin_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { @@ -907,7 +907,7 @@ vint8mf8_t test_vmin_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t // // CHECK-RV64-LABEL: @test_vmin_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmin_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -917,7 +917,7 @@ vint8mf4_t test_vmin_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t // // CHECK-RV64-LABEL: @test_vmin_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmin_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { @@ -927,7 +927,7 @@ vint8mf4_t test_vmin_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t // // CHECK-RV64-LABEL: @test_vmin_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmin_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -937,7 +937,7 @@ vint8mf2_t test_vmin_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t // // CHECK-RV64-LABEL: @test_vmin_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmin_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { @@ -947,7 +947,7 @@ vint8mf2_t test_vmin_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t // // CHECK-RV64-LABEL: @test_vmin_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmin_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -957,7 +957,7 @@ vint8m1_t test_vmin_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, // // CHECK-RV64-LABEL: @test_vmin_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmin_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { @@ -967,7 +967,7 @@ vint8m1_t test_vmin_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, // // CHECK-RV64-LABEL: @test_vmin_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmin_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -977,7 +977,7 @@ vint8m2_t test_vmin_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, // // CHECK-RV64-LABEL: @test_vmin_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmin_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { @@ -987,7 +987,7 @@ vint8m2_t test_vmin_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, // // CHECK-RV64-LABEL: @test_vmin_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmin_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -997,7 +997,7 @@ vint8m4_t test_vmin_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, // // CHECK-RV64-LABEL: @test_vmin_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmin_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { @@ -1007,7 +1007,7 @@ vint8m4_t test_vmin_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, // // CHECK-RV64-LABEL: @test_vmin_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmin_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -1017,7 +1017,7 @@ vint8m8_t test_vmin_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, // // CHECK-RV64-LABEL: @test_vmin_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmin_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { @@ -1027,7 +1027,7 @@ vint8m8_t test_vmin_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, // // CHECK-RV64-LABEL: @test_vmin_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmin_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -1037,7 +1037,7 @@ vint16mf4_t test_vmin_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vmin_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmin_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { @@ -1047,7 +1047,7 @@ vint16mf4_t test_vmin_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vmin_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmin_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -1057,7 +1057,7 @@ vint16mf2_t test_vmin_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vmin_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmin_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { @@ -1067,7 +1067,7 @@ vint16mf2_t test_vmin_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vmin_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmin_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -1077,7 +1077,7 @@ vint16m1_t test_vmin_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t // // CHECK-RV64-LABEL: @test_vmin_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmin_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { @@ -1087,7 +1087,7 @@ vint16m1_t test_vmin_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t // // CHECK-RV64-LABEL: @test_vmin_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmin_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -1097,7 +1097,7 @@ vint16m2_t test_vmin_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // // CHECK-RV64-LABEL: @test_vmin_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmin_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { @@ -1107,7 +1107,7 @@ vint16m2_t test_vmin_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // // CHECK-RV64-LABEL: @test_vmin_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmin_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1117,7 +1117,7 @@ vint16m4_t test_vmin_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // // CHECK-RV64-LABEL: @test_vmin_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmin_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { @@ -1127,7 +1127,7 @@ vint16m4_t test_vmin_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // // CHECK-RV64-LABEL: @test_vmin_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmin_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -1137,7 +1137,7 @@ vint16m8_t test_vmin_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // // CHECK-RV64-LABEL: @test_vmin_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmin_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { @@ -1147,7 +1147,7 @@ vint16m8_t test_vmin_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // // CHECK-RV64-LABEL: @test_vmin_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmin_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1157,7 +1157,7 @@ vint32mf2_t test_vmin_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m // // CHECK-RV64-LABEL: @test_vmin_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmin_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { @@ -1167,7 +1167,7 @@ vint32mf2_t test_vmin_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m // // CHECK-RV64-LABEL: @test_vmin_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmin_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1177,7 +1177,7 @@ vint32m1_t test_vmin_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t // // CHECK-RV64-LABEL: @test_vmin_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmin_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { @@ -1187,7 +1187,7 @@ vint32m1_t test_vmin_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t // // CHECK-RV64-LABEL: @test_vmin_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmin_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1197,7 +1197,7 @@ vint32m2_t test_vmin_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t // // CHECK-RV64-LABEL: @test_vmin_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmin_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { @@ -1207,7 +1207,7 @@ vint32m2_t test_vmin_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t // // CHECK-RV64-LABEL: @test_vmin_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmin_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1217,7 +1217,7 @@ vint32m4_t test_vmin_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // // CHECK-RV64-LABEL: @test_vmin_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmin_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { @@ -1227,7 +1227,7 @@ vint32m4_t test_vmin_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // // CHECK-RV64-LABEL: @test_vmin_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmin_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1237,7 +1237,7 @@ vint32m8_t test_vmin_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // // CHECK-RV64-LABEL: @test_vmin_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmin_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { @@ -1247,7 +1247,7 @@ vint32m8_t test_vmin_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // // CHECK-RV64-LABEL: @test_vmin_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmin_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -1257,7 +1257,7 @@ vint64m1_t test_vmin_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t // // CHECK-RV64-LABEL: @test_vmin_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmin_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { @@ -1267,7 +1267,7 @@ vint64m1_t test_vmin_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t // // CHECK-RV64-LABEL: @test_vmin_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmin_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -1277,7 +1277,7 @@ vint64m2_t test_vmin_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t // // CHECK-RV64-LABEL: @test_vmin_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmin_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { @@ -1287,7 +1287,7 @@ vint64m2_t test_vmin_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t // // CHECK-RV64-LABEL: @test_vmin_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmin_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -1297,7 +1297,7 @@ vint64m4_t test_vmin_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t // // CHECK-RV64-LABEL: @test_vmin_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmin_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { @@ -1307,7 +1307,7 @@ vint64m4_t test_vmin_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t // // CHECK-RV64-LABEL: @test_vmin_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmin_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -1317,7 +1317,7 @@ vint64m8_t test_vmin_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // // CHECK-RV64-LABEL: @test_vmin_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmin_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { @@ -1327,7 +1327,7 @@ vint64m8_t test_vmin_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // // CHECK-RV64-LABEL: @test_vminu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vminu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1337,7 +1337,7 @@ vuint8mf8_t test_vminu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m // // CHECK-RV64-LABEL: @test_vminu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vminu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -1347,7 +1347,7 @@ vuint8mf8_t test_vminu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m // // CHECK-RV64-LABEL: @test_vminu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vminu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1357,7 +1357,7 @@ vuint8mf4_t test_vminu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m // // CHECK-RV64-LABEL: @test_vminu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vminu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -1367,7 +1367,7 @@ vuint8mf4_t test_vminu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m // // CHECK-RV64-LABEL: @test_vminu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vminu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1377,7 +1377,7 @@ vuint8mf2_t test_vminu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m // // CHECK-RV64-LABEL: @test_vminu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vminu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -1387,7 +1387,7 @@ vuint8mf2_t test_vminu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m // // CHECK-RV64-LABEL: @test_vminu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vminu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1397,7 +1397,7 @@ vuint8m1_t test_vminu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // // CHECK-RV64-LABEL: @test_vminu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vminu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -1407,7 +1407,7 @@ vuint8m1_t test_vminu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // // CHECK-RV64-LABEL: @test_vminu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vminu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1417,7 +1417,7 @@ vuint8m2_t test_vminu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // // CHECK-RV64-LABEL: @test_vminu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vminu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -1427,7 +1427,7 @@ vuint8m2_t test_vminu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // // CHECK-RV64-LABEL: @test_vminu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vminu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -1437,7 +1437,7 @@ vuint8m4_t test_vminu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // // CHECK-RV64-LABEL: @test_vminu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vminu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -1447,7 +1447,7 @@ vuint8m4_t test_vminu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // // CHECK-RV64-LABEL: @test_vminu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vminu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -1457,7 +1457,7 @@ vuint8m8_t test_vminu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // // CHECK-RV64-LABEL: @test_vminu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vminu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -1467,7 +1467,7 @@ vuint8m8_t test_vminu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // // CHECK-RV64-LABEL: @test_vminu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vminu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -1477,7 +1477,7 @@ vuint16mf4_t test_vminu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vminu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vminu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -1487,7 +1487,7 @@ vuint16mf4_t test_vminu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vminu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vminu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -1497,7 +1497,7 @@ vuint16mf2_t test_vminu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vminu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vminu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -1507,7 +1507,7 @@ vuint16mf2_t test_vminu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vminu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vminu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -1517,7 +1517,7 @@ vuint16m1_t test_vminu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16 // // CHECK-RV64-LABEL: @test_vminu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vminu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -1527,7 +1527,7 @@ vuint16m1_t test_vminu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16 // // CHECK-RV64-LABEL: @test_vminu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vminu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -1537,7 +1537,7 @@ vuint16m2_t test_vminu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m // // CHECK-RV64-LABEL: @test_vminu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vminu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -1547,7 +1547,7 @@ vuint16m2_t test_vminu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m // // CHECK-RV64-LABEL: @test_vminu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vminu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -1557,7 +1557,7 @@ vuint16m4_t test_vminu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m // // CHECK-RV64-LABEL: @test_vminu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vminu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -1567,7 +1567,7 @@ vuint16m4_t test_vminu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m // // CHECK-RV64-LABEL: @test_vminu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vminu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -1577,7 +1577,7 @@ vuint16m8_t test_vminu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m // // CHECK-RV64-LABEL: @test_vminu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vminu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -1587,7 +1587,7 @@ vuint16m8_t test_vminu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m // // CHECK-RV64-LABEL: @test_vminu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vminu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1597,7 +1597,7 @@ vuint32mf2_t test_vminu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vminu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vminu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1607,7 +1607,7 @@ vuint32mf2_t test_vminu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vminu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vminu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1617,7 +1617,7 @@ vuint32m1_t test_vminu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32 // // CHECK-RV64-LABEL: @test_vminu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vminu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1627,7 +1627,7 @@ vuint32m1_t test_vminu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32 // // CHECK-RV64-LABEL: @test_vminu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vminu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1637,7 +1637,7 @@ vuint32m2_t test_vminu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32 // // CHECK-RV64-LABEL: @test_vminu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vminu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1647,7 +1647,7 @@ vuint32m2_t test_vminu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32 // // CHECK-RV64-LABEL: @test_vminu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vminu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1657,7 +1657,7 @@ vuint32m4_t test_vminu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vminu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vminu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1667,7 +1667,7 @@ vuint32m4_t test_vminu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vminu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vminu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1677,7 +1677,7 @@ vuint32m8_t test_vminu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vminu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vminu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -1687,7 +1687,7 @@ vuint32m8_t test_vminu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vminu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vminu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -1697,7 +1697,7 @@ vuint64m1_t test_vminu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64 // // CHECK-RV64-LABEL: @test_vminu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vminu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -1707,7 +1707,7 @@ vuint64m1_t test_vminu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64 // // CHECK-RV64-LABEL: @test_vminu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vminu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -1717,7 +1717,7 @@ vuint64m2_t test_vminu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64 // // CHECK-RV64-LABEL: @test_vminu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vminu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -1727,7 +1727,7 @@ vuint64m2_t test_vminu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64 // // CHECK-RV64-LABEL: @test_vminu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vminu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -1737,7 +1737,7 @@ vuint64m4_t test_vminu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64 // // CHECK-RV64-LABEL: @test_vminu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vminu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -1747,7 +1747,7 @@ vuint64m4_t test_vminu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64 // // CHECK-RV64-LABEL: @test_vminu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vminu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1757,7 +1757,7 @@ vuint64m8_t test_vminu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vminu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vminu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbf.c index be8576c8..551b0d8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbf.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbf.c @@ -63,7 +63,7 @@ vbool64_t test_vmsbf_m_b64(vbool64_t op1, size_t vl) { return vmsbf(op1, vl); } // // CHECK-RV64-LABEL: @test_vmsbf_m_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv64i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv64i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsbf_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1, @@ -74,7 +74,7 @@ vbool1_t test_vmsbf_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1, // // CHECK-RV64-LABEL: @test_vmsbf_m_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv32i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv32i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsbf_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1, @@ -85,7 +85,7 @@ vbool2_t test_vmsbf_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1, // // CHECK-RV64-LABEL: @test_vmsbf_m_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv16i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv16i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbf_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1, @@ -96,7 +96,7 @@ vbool4_t test_vmsbf_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1, // // CHECK-RV64-LABEL: @test_vmsbf_m_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv8i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv8i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbf_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1, @@ -107,7 +107,7 @@ vbool8_t test_vmsbf_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1, // // CHECK-RV64-LABEL: @test_vmsbf_m_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv4i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv4i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbf_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1, @@ -118,7 +118,7 @@ vbool16_t test_vmsbf_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1, // // CHECK-RV64-LABEL: @test_vmsbf_m_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv2i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv2i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbf_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1, @@ -129,7 +129,7 @@ vbool32_t test_vmsbf_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1, // // CHECK-RV64-LABEL: @test_vmsbf_m_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv1i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv1i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbf_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmseq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmseq.c index 8a3e495..f0b4841 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmseq.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmseq.c @@ -893,7 +893,7 @@ vbool8_t test_vmseq_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { // // CHECK-RV64-LABEL: @test_vmseq_vv_i8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -904,7 +904,7 @@ vbool64_t test_vmseq_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vx_i8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -915,7 +915,7 @@ vbool64_t test_vmseq_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vv_i8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -926,7 +926,7 @@ vbool32_t test_vmseq_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vx_i8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -937,7 +937,7 @@ vbool32_t test_vmseq_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vv_i8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -948,7 +948,7 @@ vbool16_t test_vmseq_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vx_i8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -959,7 +959,7 @@ vbool16_t test_vmseq_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vv_i8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -970,7 +970,7 @@ vbool8_t test_vmseq_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vx_i8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -981,7 +981,7 @@ vbool8_t test_vmseq_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vv_i8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -992,7 +992,7 @@ vbool4_t test_vmseq_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vx_i8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1003,7 +1003,7 @@ vbool4_t test_vmseq_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vv_i8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1014,7 +1014,7 @@ vbool2_t test_vmseq_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vx_i8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1025,7 +1025,7 @@ vbool2_t test_vmseq_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vv_i8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmseq_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, @@ -1036,7 +1036,7 @@ vbool1_t test_vmseq_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vx_i8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmseq_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, @@ -1047,7 +1047,7 @@ vbool1_t test_vmseq_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vv_i16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1059,7 +1059,7 @@ vbool64_t test_vmseq_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vx_i16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1070,7 +1070,7 @@ vbool64_t test_vmseq_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vv_i16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1082,7 +1082,7 @@ vbool32_t test_vmseq_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vx_i16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1093,7 +1093,7 @@ vbool32_t test_vmseq_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vv_i16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1104,7 +1104,7 @@ vbool16_t test_vmseq_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vx_i16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1115,7 +1115,7 @@ vbool16_t test_vmseq_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vv_i16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1126,7 +1126,7 @@ vbool8_t test_vmseq_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vx_i16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1137,7 +1137,7 @@ vbool8_t test_vmseq_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vv_i16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1148,7 +1148,7 @@ vbool4_t test_vmseq_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vx_i16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1159,7 +1159,7 @@ vbool4_t test_vmseq_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vv_i16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1170,7 +1170,7 @@ vbool2_t test_vmseq_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vx_i16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1181,7 +1181,7 @@ vbool2_t test_vmseq_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vv_i32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1193,7 +1193,7 @@ vbool64_t test_vmseq_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vx_i32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1204,7 +1204,7 @@ vbool64_t test_vmseq_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vv_i32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1215,7 +1215,7 @@ vbool32_t test_vmseq_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vx_i32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1226,7 +1226,7 @@ vbool32_t test_vmseq_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vv_i32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1237,7 +1237,7 @@ vbool16_t test_vmseq_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vx_i32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1248,7 +1248,7 @@ vbool16_t test_vmseq_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vv_i32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1259,7 +1259,7 @@ vbool8_t test_vmseq_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vx_i32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1270,7 +1270,7 @@ vbool8_t test_vmseq_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vv_i32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1281,7 +1281,7 @@ vbool4_t test_vmseq_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vx_i32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1292,7 +1292,7 @@ vbool4_t test_vmseq_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vv_i64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1303,7 +1303,7 @@ vbool64_t test_vmseq_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vx_i64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1314,7 +1314,7 @@ vbool64_t test_vmseq_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vv_i64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1325,7 +1325,7 @@ vbool32_t test_vmseq_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vx_i64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1336,7 +1336,7 @@ vbool32_t test_vmseq_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vv_i64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1347,7 +1347,7 @@ vbool16_t test_vmseq_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vx_i64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1358,7 +1358,7 @@ vbool16_t test_vmseq_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vv_i64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1369,7 +1369,7 @@ vbool8_t test_vmseq_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vx_i64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1380,7 +1380,7 @@ vbool8_t test_vmseq_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vv_u8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1392,7 +1392,7 @@ vbool64_t test_vmseq_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vx_u8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1403,7 +1403,7 @@ vbool64_t test_vmseq_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vv_u8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1415,7 +1415,7 @@ vbool32_t test_vmseq_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vx_u8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1426,7 +1426,7 @@ vbool32_t test_vmseq_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vv_u8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1438,7 +1438,7 @@ vbool16_t test_vmseq_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vx_u8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1449,7 +1449,7 @@ vbool16_t test_vmseq_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vv_u8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1460,7 +1460,7 @@ vbool8_t test_vmseq_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vx_u8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1471,7 +1471,7 @@ vbool8_t test_vmseq_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vv_u8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1482,7 +1482,7 @@ vbool4_t test_vmseq_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vx_u8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1493,7 +1493,7 @@ vbool4_t test_vmseq_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vv_u8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1504,7 +1504,7 @@ vbool2_t test_vmseq_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vx_u8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1515,7 +1515,7 @@ vbool2_t test_vmseq_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vv_u8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmseq_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, @@ -1526,7 +1526,7 @@ vbool1_t test_vmseq_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vx_u8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmseq_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, @@ -1537,7 +1537,7 @@ vbool1_t test_vmseq_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vv_u16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1549,7 +1549,7 @@ vbool64_t test_vmseq_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vx_u16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1561,7 +1561,7 @@ vbool64_t test_vmseq_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vv_u16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1573,7 +1573,7 @@ vbool32_t test_vmseq_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vx_u16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1585,7 +1585,7 @@ vbool32_t test_vmseq_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vv_u16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1597,7 +1597,7 @@ vbool16_t test_vmseq_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vx_u16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1608,7 +1608,7 @@ vbool16_t test_vmseq_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vv_u16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1619,7 +1619,7 @@ vbool8_t test_vmseq_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vx_u16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1630,7 +1630,7 @@ vbool8_t test_vmseq_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vv_u16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1641,7 +1641,7 @@ vbool4_t test_vmseq_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vx_u16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1652,7 +1652,7 @@ vbool4_t test_vmseq_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vv_u16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1663,7 +1663,7 @@ vbool2_t test_vmseq_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vx_u16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1674,7 +1674,7 @@ vbool2_t test_vmseq_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vv_u32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1686,7 +1686,7 @@ vbool64_t test_vmseq_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vx_u32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1698,7 +1698,7 @@ vbool64_t test_vmseq_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vv_u32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1710,7 +1710,7 @@ vbool32_t test_vmseq_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vx_u32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1721,7 +1721,7 @@ vbool32_t test_vmseq_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vv_u32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1733,7 +1733,7 @@ vbool16_t test_vmseq_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vx_u32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1744,7 +1744,7 @@ vbool16_t test_vmseq_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vv_u32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1755,7 +1755,7 @@ vbool8_t test_vmseq_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vx_u32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1766,7 +1766,7 @@ vbool8_t test_vmseq_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vv_u32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1777,7 +1777,7 @@ vbool4_t test_vmseq_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vx_u32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1788,7 +1788,7 @@ vbool4_t test_vmseq_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vv_u64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1800,7 +1800,7 @@ vbool64_t test_vmseq_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vx_u64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1811,7 +1811,7 @@ vbool64_t test_vmseq_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vv_u64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1823,7 +1823,7 @@ vbool32_t test_vmseq_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vx_u64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1834,7 +1834,7 @@ vbool32_t test_vmseq_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vv_u64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1846,7 +1846,7 @@ vbool16_t test_vmseq_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vx_u64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1857,7 +1857,7 @@ vbool16_t test_vmseq_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vv_u64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1868,7 +1868,7 @@ vbool8_t test_vmseq_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmseq_vx_u64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsge.c index d01222e..4a990e8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsge.c @@ -887,7 +887,7 @@ vbool8_t test_vmsgeu_vx_u64m8_b8 (vuint64m8_t op1, uint64_t op2, size_t vl) { // // CHECK-RV64-LABEL: @test_vmsge_vv_i8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsge_vv_i8mf8_b64_m (vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -897,7 +897,7 @@ vbool64_t test_vmsge_vv_i8mf8_b64_m (vbool64_t mask, vbool64_t maskedoff, vint8m // // CHECK-RV64-LABEL: @test_vmsge_vx_i8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsge_vx_i8mf8_b64_m (vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { @@ -907,7 +907,7 @@ vbool64_t test_vmsge_vx_i8mf8_b64_m (vbool64_t mask, vbool64_t maskedoff, vint8m // // CHECK-RV64-LABEL: @test_vmsge_vv_i8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsge_vv_i8mf4_b32_m (vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -917,7 +917,7 @@ vbool32_t test_vmsge_vv_i8mf4_b32_m (vbool32_t mask, vbool32_t maskedoff, vint8m // // CHECK-RV64-LABEL: @test_vmsge_vx_i8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsge_vx_i8mf4_b32_m (vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { @@ -927,7 +927,7 @@ vbool32_t test_vmsge_vx_i8mf4_b32_m (vbool32_t mask, vbool32_t maskedoff, vint8m // // CHECK-RV64-LABEL: @test_vmsge_vv_i8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsge_vv_i8mf2_b16_m (vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -937,7 +937,7 @@ vbool16_t test_vmsge_vv_i8mf2_b16_m (vbool16_t mask, vbool16_t maskedoff, vint8m // // CHECK-RV64-LABEL: @test_vmsge_vx_i8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsge_vx_i8mf2_b16_m (vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { @@ -947,7 +947,7 @@ vbool16_t test_vmsge_vx_i8mf2_b16_m (vbool16_t mask, vbool16_t maskedoff, vint8m // // CHECK-RV64-LABEL: @test_vmsge_vv_i8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsge_vv_i8m1_b8_m (vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -957,7 +957,7 @@ vbool8_t test_vmsge_vv_i8m1_b8_m (vbool8_t mask, vbool8_t maskedoff, vint8m1_t o // // CHECK-RV64-LABEL: @test_vmsge_vx_i8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsge_vx_i8m1_b8_m (vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { @@ -967,7 +967,7 @@ vbool8_t test_vmsge_vx_i8m1_b8_m (vbool8_t mask, vbool8_t maskedoff, vint8m1_t o // // CHECK-RV64-LABEL: @test_vmsge_vv_i8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsge_vv_i8m2_b4_m (vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -977,7 +977,7 @@ vbool4_t test_vmsge_vv_i8m2_b4_m (vbool4_t mask, vbool4_t maskedoff, vint8m2_t o // // CHECK-RV64-LABEL: @test_vmsge_vx_i8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsge_vx_i8m2_b4_m (vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { @@ -987,7 +987,7 @@ vbool4_t test_vmsge_vx_i8m2_b4_m (vbool4_t mask, vbool4_t maskedoff, vint8m2_t o // // CHECK-RV64-LABEL: @test_vmsge_vv_i8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsge_vv_i8m4_b2_m (vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -997,7 +997,7 @@ vbool2_t test_vmsge_vv_i8m4_b2_m (vbool2_t mask, vbool2_t maskedoff, vint8m4_t o // // CHECK-RV64-LABEL: @test_vmsge_vx_i8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsge_vx_i8m4_b2_m (vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { @@ -1007,7 +1007,7 @@ vbool2_t test_vmsge_vx_i8m4_b2_m (vbool2_t mask, vbool2_t maskedoff, vint8m4_t o // // CHECK-RV64-LABEL: @test_vmsge_vv_i8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsge_vv_i8m8_b1_m (vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -1017,7 +1017,7 @@ vbool1_t test_vmsge_vv_i8m8_b1_m (vbool1_t mask, vbool1_t maskedoff, vint8m8_t o // // CHECK-RV64-LABEL: @test_vmsge_vx_i8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsge_vx_i8m8_b1_m (vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { @@ -1027,7 +1027,7 @@ vbool1_t test_vmsge_vx_i8m8_b1_m (vbool1_t mask, vbool1_t maskedoff, vint8m8_t o // // CHECK-RV64-LABEL: @test_vmsge_vv_i16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsge_vv_i16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -1037,7 +1037,7 @@ vbool64_t test_vmsge_vv_i16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vint1 // // CHECK-RV64-LABEL: @test_vmsge_vx_i16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsge_vx_i16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { @@ -1047,7 +1047,7 @@ vbool64_t test_vmsge_vx_i16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vint1 // // CHECK-RV64-LABEL: @test_vmsge_vv_i16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsge_vv_i16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -1057,7 +1057,7 @@ vbool32_t test_vmsge_vv_i16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vint1 // // CHECK-RV64-LABEL: @test_vmsge_vx_i16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsge_vx_i16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { @@ -1067,7 +1067,7 @@ vbool32_t test_vmsge_vx_i16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vint1 // // CHECK-RV64-LABEL: @test_vmsge_vv_i16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsge_vv_i16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -1077,7 +1077,7 @@ vbool16_t test_vmsge_vv_i16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vint16 // // CHECK-RV64-LABEL: @test_vmsge_vx_i16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsge_vx_i16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { @@ -1087,7 +1087,7 @@ vbool16_t test_vmsge_vx_i16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vint16 // // CHECK-RV64-LABEL: @test_vmsge_vv_i16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsge_vv_i16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -1097,7 +1097,7 @@ vbool8_t test_vmsge_vv_i16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vint16m2_t // // CHECK-RV64-LABEL: @test_vmsge_vx_i16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsge_vx_i16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { @@ -1107,7 +1107,7 @@ vbool8_t test_vmsge_vx_i16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vint16m2_t // // CHECK-RV64-LABEL: @test_vmsge_vv_i16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsge_vv_i16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1117,7 +1117,7 @@ vbool4_t test_vmsge_vv_i16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vint16m4_t // // CHECK-RV64-LABEL: @test_vmsge_vx_i16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsge_vx_i16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { @@ -1127,7 +1127,7 @@ vbool4_t test_vmsge_vx_i16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vint16m4_t // // CHECK-RV64-LABEL: @test_vmsge_vv_i16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsge_vv_i16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -1137,7 +1137,7 @@ vbool2_t test_vmsge_vv_i16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vint16m8_t // // CHECK-RV64-LABEL: @test_vmsge_vx_i16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsge_vx_i16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { @@ -1147,7 +1147,7 @@ vbool2_t test_vmsge_vx_i16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vint16m8_t // // CHECK-RV64-LABEL: @test_vmsge_vv_i32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsge_vv_i32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1157,7 +1157,7 @@ vbool64_t test_vmsge_vv_i32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vint3 // // CHECK-RV64-LABEL: @test_vmsge_vx_i32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsge_vx_i32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { @@ -1167,7 +1167,7 @@ vbool64_t test_vmsge_vx_i32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vint3 // // CHECK-RV64-LABEL: @test_vmsge_vv_i32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsge_vv_i32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1177,7 +1177,7 @@ vbool32_t test_vmsge_vv_i32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vint32 // // CHECK-RV64-LABEL: @test_vmsge_vx_i32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsge_vx_i32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { @@ -1187,7 +1187,7 @@ vbool32_t test_vmsge_vx_i32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vint32 // // CHECK-RV64-LABEL: @test_vmsge_vv_i32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsge_vv_i32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1197,7 +1197,7 @@ vbool16_t test_vmsge_vv_i32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vint32 // // CHECK-RV64-LABEL: @test_vmsge_vx_i32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsge_vx_i32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { @@ -1207,7 +1207,7 @@ vbool16_t test_vmsge_vx_i32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vint32 // // CHECK-RV64-LABEL: @test_vmsge_vv_i32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsge_vv_i32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1217,7 +1217,7 @@ vbool8_t test_vmsge_vv_i32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vint32m4_t // // CHECK-RV64-LABEL: @test_vmsge_vx_i32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsge_vx_i32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { @@ -1227,7 +1227,7 @@ vbool8_t test_vmsge_vx_i32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vint32m4_t // // CHECK-RV64-LABEL: @test_vmsge_vv_i32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsge_vv_i32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1237,7 +1237,7 @@ vbool4_t test_vmsge_vv_i32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vint32m8_t // // CHECK-RV64-LABEL: @test_vmsge_vx_i32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsge_vx_i32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { @@ -1247,7 +1247,7 @@ vbool4_t test_vmsge_vx_i32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vint32m8_t // // CHECK-RV64-LABEL: @test_vmsge_vv_i64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsge_vv_i64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -1257,7 +1257,7 @@ vbool64_t test_vmsge_vv_i64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vint64 // // CHECK-RV64-LABEL: @test_vmsge_vx_i64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsge_vx_i64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { @@ -1267,7 +1267,7 @@ vbool64_t test_vmsge_vx_i64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vint64 // // CHECK-RV64-LABEL: @test_vmsge_vv_i64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsge_vv_i64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -1277,7 +1277,7 @@ vbool32_t test_vmsge_vv_i64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vint64 // // CHECK-RV64-LABEL: @test_vmsge_vx_i64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsge_vx_i64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { @@ -1287,7 +1287,7 @@ vbool32_t test_vmsge_vx_i64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vint64 // // CHECK-RV64-LABEL: @test_vmsge_vv_i64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsge_vv_i64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -1297,7 +1297,7 @@ vbool16_t test_vmsge_vv_i64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vint64 // // CHECK-RV64-LABEL: @test_vmsge_vx_i64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsge_vx_i64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { @@ -1307,7 +1307,7 @@ vbool16_t test_vmsge_vx_i64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vint64 // // CHECK-RV64-LABEL: @test_vmsge_vv_i64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsge_vv_i64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -1317,7 +1317,7 @@ vbool8_t test_vmsge_vv_i64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vint64m8_t // // CHECK-RV64-LABEL: @test_vmsge_vx_i64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsge.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsge_vx_i64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { @@ -1327,7 +1327,7 @@ vbool8_t test_vmsge_vx_i64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vint64m8_t // // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgeu_vv_u8mf8_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1337,7 +1337,7 @@ vbool64_t test_vmsgeu_vv_u8mf8_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgeu_vx_u8mf8_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -1347,7 +1347,7 @@ vbool64_t test_vmsgeu_vx_u8mf8_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgeu_vv_u8mf4_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1357,7 +1357,7 @@ vbool32_t test_vmsgeu_vv_u8mf4_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgeu_vx_u8mf4_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -1367,7 +1367,7 @@ vbool32_t test_vmsgeu_vx_u8mf4_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgeu_vv_u8mf2_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1377,7 +1377,7 @@ vbool16_t test_vmsgeu_vv_u8mf2_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgeu_vx_u8mf2_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -1387,7 +1387,7 @@ vbool16_t test_vmsgeu_vx_u8mf2_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgeu_vv_u8m1_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1397,7 +1397,7 @@ vbool8_t test_vmsgeu_vv_u8m1_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint8m1_t // // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgeu_vx_u8m1_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -1407,7 +1407,7 @@ vbool8_t test_vmsgeu_vx_u8m1_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint8m1_t // // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgeu_vv_u8m2_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1417,7 +1417,7 @@ vbool4_t test_vmsgeu_vv_u8m2_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint8m2_t // // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgeu_vx_u8m2_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -1427,7 +1427,7 @@ vbool4_t test_vmsgeu_vx_u8m2_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint8m2_t // // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgeu_vv_u8m4_b2_m (vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -1437,7 +1437,7 @@ vbool2_t test_vmsgeu_vv_u8m4_b2_m (vbool2_t mask, vbool2_t maskedoff, vuint8m4_t // // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgeu_vx_u8m4_b2_m (vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -1447,7 +1447,7 @@ vbool2_t test_vmsgeu_vx_u8m4_b2_m (vbool2_t mask, vbool2_t maskedoff, vuint8m4_t // // CHECK-RV64-LABEL: @test_vmsgeu_vv_u8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsgeu_vv_u8m8_b1_m (vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -1457,7 +1457,7 @@ vbool1_t test_vmsgeu_vv_u8m8_b1_m (vbool1_t mask, vbool1_t maskedoff, vuint8m8_t // // CHECK-RV64-LABEL: @test_vmsgeu_vx_u8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsgeu_vx_u8m8_b1_m (vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -1467,7 +1467,7 @@ vbool1_t test_vmsgeu_vx_u8m8_b1_m (vbool1_t mask, vbool1_t maskedoff, vuint8m8_t // // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgeu_vv_u16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -1477,7 +1477,7 @@ vbool64_t test_vmsgeu_vv_u16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgeu_vx_u16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -1487,7 +1487,7 @@ vbool64_t test_vmsgeu_vx_u16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgeu_vv_u16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -1497,7 +1497,7 @@ vbool32_t test_vmsgeu_vv_u16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgeu_vx_u16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -1507,7 +1507,7 @@ vbool32_t test_vmsgeu_vx_u16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgeu_vv_u16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -1517,7 +1517,7 @@ vbool16_t test_vmsgeu_vv_u16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgeu_vx_u16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -1527,7 +1527,7 @@ vbool16_t test_vmsgeu_vx_u16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgeu_vv_u16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -1537,7 +1537,7 @@ vbool8_t test_vmsgeu_vv_u16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint16m2 // // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgeu_vx_u16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -1547,7 +1547,7 @@ vbool8_t test_vmsgeu_vx_u16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint16m2 // // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgeu_vv_u16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -1557,7 +1557,7 @@ vbool4_t test_vmsgeu_vv_u16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint16m4 // // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgeu_vx_u16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -1567,7 +1567,7 @@ vbool4_t test_vmsgeu_vx_u16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint16m4 // // CHECK-RV64-LABEL: @test_vmsgeu_vv_u16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgeu_vv_u16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -1577,7 +1577,7 @@ vbool2_t test_vmsgeu_vv_u16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vuint16m8 // // CHECK-RV64-LABEL: @test_vmsgeu_vx_u16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgeu_vx_u16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -1587,7 +1587,7 @@ vbool2_t test_vmsgeu_vx_u16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vuint16m8 // // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgeu_vv_u32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1597,7 +1597,7 @@ vbool64_t test_vmsgeu_vv_u32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgeu_vx_u32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1607,7 +1607,7 @@ vbool64_t test_vmsgeu_vx_u32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgeu_vv_u32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1617,7 +1617,7 @@ vbool32_t test_vmsgeu_vv_u32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgeu_vx_u32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1627,7 +1627,7 @@ vbool32_t test_vmsgeu_vx_u32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgeu_vv_u32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1637,7 +1637,7 @@ vbool16_t test_vmsgeu_vv_u32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgeu_vx_u32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1647,7 +1647,7 @@ vbool16_t test_vmsgeu_vx_u32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgeu_vv_u32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1657,7 +1657,7 @@ vbool8_t test_vmsgeu_vv_u32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint32m4 // // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgeu_vx_u32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1667,7 +1667,7 @@ vbool8_t test_vmsgeu_vx_u32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint32m4 // // CHECK-RV64-LABEL: @test_vmsgeu_vv_u32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgeu_vv_u32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1677,7 +1677,7 @@ vbool4_t test_vmsgeu_vv_u32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint32m8 // // CHECK-RV64-LABEL: @test_vmsgeu_vx_u32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgeu_vx_u32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -1687,7 +1687,7 @@ vbool4_t test_vmsgeu_vx_u32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint32m8 // // CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgeu_vv_u64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -1697,7 +1697,7 @@ vbool64_t test_vmsgeu_vv_u64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgeu_vx_u64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -1707,7 +1707,7 @@ vbool64_t test_vmsgeu_vx_u64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgeu_vv_u64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -1717,7 +1717,7 @@ vbool32_t test_vmsgeu_vv_u64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgeu_vx_u64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -1727,7 +1727,7 @@ vbool32_t test_vmsgeu_vx_u64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgeu_vv_u64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -1737,7 +1737,7 @@ vbool16_t test_vmsgeu_vv_u64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgeu_vx_u64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -1747,7 +1747,7 @@ vbool16_t test_vmsgeu_vx_u64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vmsgeu_vv_u64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgeu_vv_u64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1757,7 +1757,7 @@ vbool8_t test_vmsgeu_vv_u64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint64m8 // // CHECK-RV64-LABEL: @test_vmsgeu_vx_u64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgeu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgeu_vx_u64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsgt.c index 6b95f0b..634db12 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsgt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsgt.c @@ -887,7 +887,7 @@ vbool8_t test_vmsgtu_vx_u64m8_b8 (vuint64m8_t op1, uint64_t op2, size_t vl) { // // CHECK-RV64-LABEL: @test_vmsgt_vv_i8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgt_vv_i8mf8_b64_m (vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -897,7 +897,7 @@ vbool64_t test_vmsgt_vv_i8mf8_b64_m (vbool64_t mask, vbool64_t maskedoff, vint8m // // CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgt_vx_i8mf8_b64_m (vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { @@ -907,7 +907,7 @@ vbool64_t test_vmsgt_vx_i8mf8_b64_m (vbool64_t mask, vbool64_t maskedoff, vint8m // // CHECK-RV64-LABEL: @test_vmsgt_vv_i8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgt_vv_i8mf4_b32_m (vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -917,7 +917,7 @@ vbool32_t test_vmsgt_vv_i8mf4_b32_m (vbool32_t mask, vbool32_t maskedoff, vint8m // // CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgt_vx_i8mf4_b32_m (vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { @@ -927,7 +927,7 @@ vbool32_t test_vmsgt_vx_i8mf4_b32_m (vbool32_t mask, vbool32_t maskedoff, vint8m // // CHECK-RV64-LABEL: @test_vmsgt_vv_i8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgt_vv_i8mf2_b16_m (vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -937,7 +937,7 @@ vbool16_t test_vmsgt_vv_i8mf2_b16_m (vbool16_t mask, vbool16_t maskedoff, vint8m // // CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgt_vx_i8mf2_b16_m (vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { @@ -947,7 +947,7 @@ vbool16_t test_vmsgt_vx_i8mf2_b16_m (vbool16_t mask, vbool16_t maskedoff, vint8m // // CHECK-RV64-LABEL: @test_vmsgt_vv_i8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgt_vv_i8m1_b8_m (vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -957,7 +957,7 @@ vbool8_t test_vmsgt_vv_i8m1_b8_m (vbool8_t mask, vbool8_t maskedoff, vint8m1_t o // // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgt_vx_i8m1_b8_m (vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { @@ -967,7 +967,7 @@ vbool8_t test_vmsgt_vx_i8m1_b8_m (vbool8_t mask, vbool8_t maskedoff, vint8m1_t o // // CHECK-RV64-LABEL: @test_vmsgt_vv_i8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgt_vv_i8m2_b4_m (vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -977,7 +977,7 @@ vbool4_t test_vmsgt_vv_i8m2_b4_m (vbool4_t mask, vbool4_t maskedoff, vint8m2_t o // // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgt_vx_i8m2_b4_m (vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { @@ -987,7 +987,7 @@ vbool4_t test_vmsgt_vx_i8m2_b4_m (vbool4_t mask, vbool4_t maskedoff, vint8m2_t o // // CHECK-RV64-LABEL: @test_vmsgt_vv_i8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgt_vv_i8m4_b2_m (vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -997,7 +997,7 @@ vbool2_t test_vmsgt_vv_i8m4_b2_m (vbool2_t mask, vbool2_t maskedoff, vint8m4_t o // // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgt_vx_i8m4_b2_m (vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { @@ -1007,7 +1007,7 @@ vbool2_t test_vmsgt_vx_i8m4_b2_m (vbool2_t mask, vbool2_t maskedoff, vint8m4_t o // // CHECK-RV64-LABEL: @test_vmsgt_vv_i8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsgt_vv_i8m8_b1_m (vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -1017,7 +1017,7 @@ vbool1_t test_vmsgt_vv_i8m8_b1_m (vbool1_t mask, vbool1_t maskedoff, vint8m8_t o // // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsgt_vx_i8m8_b1_m (vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { @@ -1027,7 +1027,7 @@ vbool1_t test_vmsgt_vx_i8m8_b1_m (vbool1_t mask, vbool1_t maskedoff, vint8m8_t o // // CHECK-RV64-LABEL: @test_vmsgt_vv_i16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgt_vv_i16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -1037,7 +1037,7 @@ vbool64_t test_vmsgt_vv_i16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vint1 // // CHECK-RV64-LABEL: @test_vmsgt_vx_i16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgt_vx_i16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { @@ -1047,7 +1047,7 @@ vbool64_t test_vmsgt_vx_i16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vint1 // // CHECK-RV64-LABEL: @test_vmsgt_vv_i16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgt_vv_i16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -1057,7 +1057,7 @@ vbool32_t test_vmsgt_vv_i16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vint1 // // CHECK-RV64-LABEL: @test_vmsgt_vx_i16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgt_vx_i16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { @@ -1067,7 +1067,7 @@ vbool32_t test_vmsgt_vx_i16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vint1 // // CHECK-RV64-LABEL: @test_vmsgt_vv_i16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgt_vv_i16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -1077,7 +1077,7 @@ vbool16_t test_vmsgt_vv_i16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vint16 // // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgt_vx_i16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { @@ -1087,7 +1087,7 @@ vbool16_t test_vmsgt_vx_i16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vint16 // // CHECK-RV64-LABEL: @test_vmsgt_vv_i16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgt_vv_i16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -1097,7 +1097,7 @@ vbool8_t test_vmsgt_vv_i16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vint16m2_t // // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgt_vx_i16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { @@ -1107,7 +1107,7 @@ vbool8_t test_vmsgt_vx_i16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vint16m2_t // // CHECK-RV64-LABEL: @test_vmsgt_vv_i16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgt_vv_i16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1117,7 +1117,7 @@ vbool4_t test_vmsgt_vv_i16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vint16m4_t // // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgt_vx_i16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { @@ -1127,7 +1127,7 @@ vbool4_t test_vmsgt_vx_i16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vint16m4_t // // CHECK-RV64-LABEL: @test_vmsgt_vv_i16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgt_vv_i16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -1137,7 +1137,7 @@ vbool2_t test_vmsgt_vv_i16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vint16m8_t // // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgt_vx_i16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { @@ -1147,7 +1147,7 @@ vbool2_t test_vmsgt_vx_i16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vint16m8_t // // CHECK-RV64-LABEL: @test_vmsgt_vv_i32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgt_vv_i32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1157,7 +1157,7 @@ vbool64_t test_vmsgt_vv_i32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vint3 // // CHECK-RV64-LABEL: @test_vmsgt_vx_i32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgt_vx_i32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { @@ -1167,7 +1167,7 @@ vbool64_t test_vmsgt_vx_i32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vint3 // // CHECK-RV64-LABEL: @test_vmsgt_vv_i32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgt_vv_i32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1177,7 +1177,7 @@ vbool32_t test_vmsgt_vv_i32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vint32 // // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgt_vx_i32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { @@ -1187,7 +1187,7 @@ vbool32_t test_vmsgt_vx_i32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vint32 // // CHECK-RV64-LABEL: @test_vmsgt_vv_i32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgt_vv_i32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1197,7 +1197,7 @@ vbool16_t test_vmsgt_vv_i32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vint32 // // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgt_vx_i32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { @@ -1207,7 +1207,7 @@ vbool16_t test_vmsgt_vx_i32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vint32 // // CHECK-RV64-LABEL: @test_vmsgt_vv_i32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgt_vv_i32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1217,7 +1217,7 @@ vbool8_t test_vmsgt_vv_i32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vint32m4_t // // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgt_vx_i32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { @@ -1227,7 +1227,7 @@ vbool8_t test_vmsgt_vx_i32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vint32m4_t // // CHECK-RV64-LABEL: @test_vmsgt_vv_i32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgt_vv_i32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1237,7 +1237,7 @@ vbool4_t test_vmsgt_vv_i32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vint32m8_t // // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgt_vx_i32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { @@ -1247,7 +1247,7 @@ vbool4_t test_vmsgt_vx_i32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vint32m8_t // // CHECK-RV64-LABEL: @test_vmsgt_vv_i64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgt_vv_i64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -1257,7 +1257,7 @@ vbool64_t test_vmsgt_vv_i64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vint64 // // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgt_vx_i64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { @@ -1267,7 +1267,7 @@ vbool64_t test_vmsgt_vx_i64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vint64 // // CHECK-RV64-LABEL: @test_vmsgt_vv_i64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgt_vv_i64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -1277,7 +1277,7 @@ vbool32_t test_vmsgt_vv_i64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vint64 // // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgt_vx_i64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { @@ -1287,7 +1287,7 @@ vbool32_t test_vmsgt_vx_i64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vint64 // // CHECK-RV64-LABEL: @test_vmsgt_vv_i64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgt_vv_i64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -1297,7 +1297,7 @@ vbool16_t test_vmsgt_vv_i64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vint64 // // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgt_vx_i64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { @@ -1307,7 +1307,7 @@ vbool16_t test_vmsgt_vx_i64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vint64 // // CHECK-RV64-LABEL: @test_vmsgt_vv_i64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgt_vv_i64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -1317,7 +1317,7 @@ vbool8_t test_vmsgt_vv_i64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vint64m8_t // // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgt_vx_i64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { @@ -1327,7 +1327,7 @@ vbool8_t test_vmsgt_vx_i64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vint64m8_t // // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgtu_vv_u8mf8_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1337,7 +1337,7 @@ vbool64_t test_vmsgtu_vv_u8mf8_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgtu_vx_u8mf8_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -1347,7 +1347,7 @@ vbool64_t test_vmsgtu_vx_u8mf8_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgtu_vv_u8mf4_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1357,7 +1357,7 @@ vbool32_t test_vmsgtu_vv_u8mf4_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgtu_vx_u8mf4_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -1367,7 +1367,7 @@ vbool32_t test_vmsgtu_vx_u8mf4_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgtu_vv_u8mf2_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1377,7 +1377,7 @@ vbool16_t test_vmsgtu_vv_u8mf2_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgtu_vx_u8mf2_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -1387,7 +1387,7 @@ vbool16_t test_vmsgtu_vx_u8mf2_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgtu_vv_u8m1_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1397,7 +1397,7 @@ vbool8_t test_vmsgtu_vv_u8m1_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint8m1_t // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgtu_vx_u8m1_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -1407,7 +1407,7 @@ vbool8_t test_vmsgtu_vx_u8m1_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint8m1_t // // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgtu_vv_u8m2_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1417,7 +1417,7 @@ vbool4_t test_vmsgtu_vv_u8m2_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint8m2_t // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgtu_vx_u8m2_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -1427,7 +1427,7 @@ vbool4_t test_vmsgtu_vx_u8m2_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint8m2_t // // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgtu_vv_u8m4_b2_m (vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -1437,7 +1437,7 @@ vbool2_t test_vmsgtu_vv_u8m4_b2_m (vbool2_t mask, vbool2_t maskedoff, vuint8m4_t // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgtu_vx_u8m4_b2_m (vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -1447,7 +1447,7 @@ vbool2_t test_vmsgtu_vx_u8m4_b2_m (vbool2_t mask, vbool2_t maskedoff, vuint8m4_t // // CHECK-RV64-LABEL: @test_vmsgtu_vv_u8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsgtu_vv_u8m8_b1_m (vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -1457,7 +1457,7 @@ vbool1_t test_vmsgtu_vv_u8m8_b1_m (vbool1_t mask, vbool1_t maskedoff, vuint8m8_t // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsgtu_vx_u8m8_b1_m (vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -1467,7 +1467,7 @@ vbool1_t test_vmsgtu_vx_u8m8_b1_m (vbool1_t mask, vbool1_t maskedoff, vuint8m8_t // // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgtu_vv_u16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -1477,7 +1477,7 @@ vbool64_t test_vmsgtu_vv_u16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgtu_vx_u16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -1487,7 +1487,7 @@ vbool64_t test_vmsgtu_vx_u16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgtu_vv_u16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -1497,7 +1497,7 @@ vbool32_t test_vmsgtu_vv_u16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgtu_vx_u16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -1507,7 +1507,7 @@ vbool32_t test_vmsgtu_vx_u16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgtu_vv_u16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -1517,7 +1517,7 @@ vbool16_t test_vmsgtu_vv_u16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgtu_vx_u16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -1527,7 +1527,7 @@ vbool16_t test_vmsgtu_vx_u16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgtu_vv_u16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -1537,7 +1537,7 @@ vbool8_t test_vmsgtu_vv_u16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint16m2 // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgtu_vx_u16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -1547,7 +1547,7 @@ vbool8_t test_vmsgtu_vx_u16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint16m2 // // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgtu_vv_u16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -1557,7 +1557,7 @@ vbool4_t test_vmsgtu_vv_u16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint16m4 // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgtu_vx_u16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -1567,7 +1567,7 @@ vbool4_t test_vmsgtu_vx_u16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint16m4 // // CHECK-RV64-LABEL: @test_vmsgtu_vv_u16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgtu_vv_u16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -1577,7 +1577,7 @@ vbool2_t test_vmsgtu_vv_u16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vuint16m8 // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgtu_vx_u16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -1587,7 +1587,7 @@ vbool2_t test_vmsgtu_vx_u16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vuint16m8 // // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgtu_vv_u32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1597,7 +1597,7 @@ vbool64_t test_vmsgtu_vv_u32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgtu_vx_u32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1607,7 +1607,7 @@ vbool64_t test_vmsgtu_vx_u32mf2_b64_m (vbool64_t mask, vbool64_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgtu_vv_u32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1617,7 +1617,7 @@ vbool32_t test_vmsgtu_vv_u32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgtu_vx_u32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1627,7 +1627,7 @@ vbool32_t test_vmsgtu_vx_u32m1_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgtu_vv_u32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1637,7 +1637,7 @@ vbool16_t test_vmsgtu_vv_u32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgtu_vx_u32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1647,7 +1647,7 @@ vbool16_t test_vmsgtu_vx_u32m2_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgtu_vv_u32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1657,7 +1657,7 @@ vbool8_t test_vmsgtu_vv_u32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint32m4 // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgtu_vx_u32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1667,7 +1667,7 @@ vbool8_t test_vmsgtu_vx_u32m4_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint32m4 // // CHECK-RV64-LABEL: @test_vmsgtu_vv_u32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgtu_vv_u32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1677,7 +1677,7 @@ vbool4_t test_vmsgtu_vv_u32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint32m8 // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgtu_vx_u32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -1687,7 +1687,7 @@ vbool4_t test_vmsgtu_vx_u32m8_b4_m (vbool4_t mask, vbool4_t maskedoff, vuint32m8 // // CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgtu_vv_u64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -1697,7 +1697,7 @@ vbool64_t test_vmsgtu_vv_u64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgtu_vx_u64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -1707,7 +1707,7 @@ vbool64_t test_vmsgtu_vx_u64m1_b64_m (vbool64_t mask, vbool64_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgtu_vv_u64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -1717,7 +1717,7 @@ vbool32_t test_vmsgtu_vv_u64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgtu_vx_u64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -1727,7 +1727,7 @@ vbool32_t test_vmsgtu_vx_u64m2_b32_m (vbool32_t mask, vbool32_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgtu_vv_u64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -1737,7 +1737,7 @@ vbool16_t test_vmsgtu_vv_u64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgtu_vx_u64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -1747,7 +1747,7 @@ vbool16_t test_vmsgtu_vx_u64m4_b16_m (vbool16_t mask, vbool16_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vmsgtu_vv_u64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgtu_vv_u64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1757,7 +1757,7 @@ vbool8_t test_vmsgtu_vv_u64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint64m8 // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgtu_vx_u64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsif.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsif.c index d337d63a..d655d42 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsif.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsif.c @@ -63,7 +63,7 @@ vbool64_t test_vmsif_m_b64(vbool64_t op1, size_t vl) { return vmsif(op1, vl); } // // CHECK-RV64-LABEL: @test_vmsif_m_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv64i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv64i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsif_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1, @@ -74,7 +74,7 @@ vbool1_t test_vmsif_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1, // // CHECK-RV64-LABEL: @test_vmsif_m_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv32i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv32i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsif_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1, @@ -85,7 +85,7 @@ vbool2_t test_vmsif_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1, // // CHECK-RV64-LABEL: @test_vmsif_m_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv16i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv16i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsif_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1, @@ -96,7 +96,7 @@ vbool4_t test_vmsif_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1, // // CHECK-RV64-LABEL: @test_vmsif_m_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv8i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv8i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsif_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1, @@ -107,7 +107,7 @@ vbool8_t test_vmsif_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1, // // CHECK-RV64-LABEL: @test_vmsif_m_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv4i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv4i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsif_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1, @@ -118,7 +118,7 @@ vbool16_t test_vmsif_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1, // // CHECK-RV64-LABEL: @test_vmsif_m_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv2i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv2i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsif_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1, @@ -129,7 +129,7 @@ vbool32_t test_vmsif_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1, // // CHECK-RV64-LABEL: @test_vmsif_m_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv1i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv1i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsif_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsle.c index d185473..12b6637 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsle.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsle.c @@ -902,7 +902,7 @@ vbool8_t test_vmsleu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { // // CHECK-RV64-LABEL: @test_vmsle_vv_i8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -913,7 +913,7 @@ vbool64_t test_vmsle_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsle_vx_i8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -924,7 +924,7 @@ vbool64_t test_vmsle_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsle_vv_i8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -935,7 +935,7 @@ vbool32_t test_vmsle_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsle_vx_i8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -946,7 +946,7 @@ vbool32_t test_vmsle_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsle_vv_i8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -957,7 +957,7 @@ vbool16_t test_vmsle_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsle_vx_i8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -968,7 +968,7 @@ vbool16_t test_vmsle_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsle_vv_i8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -979,7 +979,7 @@ vbool8_t test_vmsle_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsle_vx_i8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -990,7 +990,7 @@ vbool8_t test_vmsle_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsle_vv_i8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsle_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1001,7 +1001,7 @@ vbool4_t test_vmsle_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsle_vx_i8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsle_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1012,7 +1012,7 @@ vbool4_t test_vmsle_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsle_vv_i8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsle_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1023,7 +1023,7 @@ vbool2_t test_vmsle_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsle_vx_i8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsle_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1034,7 +1034,7 @@ vbool2_t test_vmsle_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsle_vv_i8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsle_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, @@ -1045,7 +1045,7 @@ vbool1_t test_vmsle_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsle_vx_i8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsle_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, @@ -1056,7 +1056,7 @@ vbool1_t test_vmsle_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsle_vv_i16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1068,7 +1068,7 @@ vbool64_t test_vmsle_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsle_vx_i16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1079,7 +1079,7 @@ vbool64_t test_vmsle_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsle_vv_i16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1091,7 +1091,7 @@ vbool32_t test_vmsle_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsle_vx_i16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1102,7 +1102,7 @@ vbool32_t test_vmsle_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsle_vv_i16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1113,7 +1113,7 @@ vbool16_t test_vmsle_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsle_vx_i16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1124,7 +1124,7 @@ vbool16_t test_vmsle_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsle_vv_i16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1135,7 +1135,7 @@ vbool8_t test_vmsle_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsle_vx_i16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1146,7 +1146,7 @@ vbool8_t test_vmsle_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsle_vv_i16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsle_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1157,7 +1157,7 @@ vbool4_t test_vmsle_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsle_vx_i16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsle_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1168,7 +1168,7 @@ vbool4_t test_vmsle_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsle_vv_i16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsle_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1179,7 +1179,7 @@ vbool2_t test_vmsle_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsle_vx_i16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsle_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1190,7 +1190,7 @@ vbool2_t test_vmsle_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsle_vv_i32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1202,7 +1202,7 @@ vbool64_t test_vmsle_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsle_vx_i32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1213,7 +1213,7 @@ vbool64_t test_vmsle_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsle_vv_i32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1224,7 +1224,7 @@ vbool32_t test_vmsle_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsle_vx_i32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1235,7 +1235,7 @@ vbool32_t test_vmsle_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsle_vv_i32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1246,7 +1246,7 @@ vbool16_t test_vmsle_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsle_vx_i32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1257,7 +1257,7 @@ vbool16_t test_vmsle_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsle_vv_i32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1268,7 +1268,7 @@ vbool8_t test_vmsle_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsle_vx_i32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1279,7 +1279,7 @@ vbool8_t test_vmsle_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsle_vv_i32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsle_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1290,7 +1290,7 @@ vbool4_t test_vmsle_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsle_vx_i32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsle_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1301,7 +1301,7 @@ vbool4_t test_vmsle_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsle_vv_i64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1312,7 +1312,7 @@ vbool64_t test_vmsle_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsle_vx_i64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1323,7 +1323,7 @@ vbool64_t test_vmsle_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsle_vv_i64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1334,7 +1334,7 @@ vbool32_t test_vmsle_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsle_vx_i64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1345,7 +1345,7 @@ vbool32_t test_vmsle_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsle_vv_i64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1356,7 +1356,7 @@ vbool16_t test_vmsle_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsle_vx_i64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1367,7 +1367,7 @@ vbool16_t test_vmsle_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsle_vv_i64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1378,7 +1378,7 @@ vbool8_t test_vmsle_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsle_vx_i64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1389,7 +1389,7 @@ vbool8_t test_vmsle_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1401,7 +1401,7 @@ vbool64_t test_vmsleu_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1412,7 +1412,7 @@ vbool64_t test_vmsleu_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1424,7 +1424,7 @@ vbool32_t test_vmsleu_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1435,7 +1435,7 @@ vbool32_t test_vmsleu_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1447,7 +1447,7 @@ vbool16_t test_vmsleu_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1458,7 +1458,7 @@ vbool16_t test_vmsleu_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1469,7 +1469,7 @@ vbool8_t test_vmsleu_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1480,7 +1480,7 @@ vbool8_t test_vmsleu_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsleu_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1491,7 +1491,7 @@ vbool4_t test_vmsleu_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsleu_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1502,7 +1502,7 @@ vbool4_t test_vmsleu_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsleu_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1513,7 +1513,7 @@ vbool2_t test_vmsleu_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsleu_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1524,7 +1524,7 @@ vbool2_t test_vmsleu_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsleu_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, @@ -1535,7 +1535,7 @@ vbool1_t test_vmsleu_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsleu_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, @@ -1546,7 +1546,7 @@ vbool1_t test_vmsleu_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1558,7 +1558,7 @@ vbool64_t test_vmsleu_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1570,7 +1570,7 @@ vbool64_t test_vmsleu_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1582,7 +1582,7 @@ vbool32_t test_vmsleu_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1594,7 +1594,7 @@ vbool32_t test_vmsleu_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1606,7 +1606,7 @@ vbool16_t test_vmsleu_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1617,7 +1617,7 @@ vbool16_t test_vmsleu_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1629,7 +1629,7 @@ vbool8_t test_vmsleu_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1640,7 +1640,7 @@ vbool8_t test_vmsleu_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsleu_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1652,7 +1652,7 @@ vbool4_t test_vmsleu_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsleu_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1663,7 +1663,7 @@ vbool4_t test_vmsleu_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsleu_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1675,7 +1675,7 @@ vbool2_t test_vmsleu_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsleu_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1686,7 +1686,7 @@ vbool2_t test_vmsleu_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsleu_vv_u32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1698,7 +1698,7 @@ vbool64_t test_vmsleu_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsleu_vx_u32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1710,7 +1710,7 @@ vbool64_t test_vmsleu_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1722,7 +1722,7 @@ vbool32_t test_vmsleu_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1733,7 +1733,7 @@ vbool32_t test_vmsleu_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1745,7 +1745,7 @@ vbool16_t test_vmsleu_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1756,7 +1756,7 @@ vbool16_t test_vmsleu_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1768,7 +1768,7 @@ vbool8_t test_vmsleu_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1779,7 +1779,7 @@ vbool8_t test_vmsleu_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsleu_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1791,7 +1791,7 @@ vbool4_t test_vmsleu_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsleu_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1802,7 +1802,7 @@ vbool4_t test_vmsleu_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1814,7 +1814,7 @@ vbool64_t test_vmsleu_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1825,7 +1825,7 @@ vbool64_t test_vmsleu_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1837,7 +1837,7 @@ vbool32_t test_vmsleu_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1848,7 +1848,7 @@ vbool32_t test_vmsleu_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1860,7 +1860,7 @@ vbool16_t test_vmsleu_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1871,7 +1871,7 @@ vbool16_t test_vmsleu_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1883,7 +1883,7 @@ vbool8_t test_vmsleu_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmslt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmslt.c index 24a5b00..07b864c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmslt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmslt.c @@ -902,7 +902,7 @@ vbool8_t test_vmsltu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { // // CHECK-RV64-LABEL: @test_vmslt_vv_i8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -913,7 +913,7 @@ vbool64_t test_vmslt_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmslt_vx_i8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -924,7 +924,7 @@ vbool64_t test_vmslt_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmslt_vv_i8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -935,7 +935,7 @@ vbool32_t test_vmslt_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmslt_vx_i8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -946,7 +946,7 @@ vbool32_t test_vmslt_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmslt_vv_i8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -957,7 +957,7 @@ vbool16_t test_vmslt_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmslt_vx_i8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -968,7 +968,7 @@ vbool16_t test_vmslt_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmslt_vv_i8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -979,7 +979,7 @@ vbool8_t test_vmslt_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmslt_vx_i8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -990,7 +990,7 @@ vbool8_t test_vmslt_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmslt_vv_i8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmslt_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1001,7 +1001,7 @@ vbool4_t test_vmslt_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmslt_vx_i8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmslt_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1012,7 +1012,7 @@ vbool4_t test_vmslt_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmslt_vv_i8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmslt_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1023,7 +1023,7 @@ vbool2_t test_vmslt_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, // // CHECK-RV64-LABEL: @test_vmslt_vx_i8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmslt_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1034,7 +1034,7 @@ vbool2_t test_vmslt_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, // // CHECK-RV64-LABEL: @test_vmslt_vv_i8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmslt_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, @@ -1045,7 +1045,7 @@ vbool1_t test_vmslt_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, // // CHECK-RV64-LABEL: @test_vmslt_vx_i8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmslt_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, @@ -1056,7 +1056,7 @@ vbool1_t test_vmslt_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, // // CHECK-RV64-LABEL: @test_vmslt_vv_i16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1068,7 +1068,7 @@ vbool64_t test_vmslt_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmslt_vx_i16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1079,7 +1079,7 @@ vbool64_t test_vmslt_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmslt_vv_i16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1091,7 +1091,7 @@ vbool32_t test_vmslt_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmslt_vx_i16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1102,7 +1102,7 @@ vbool32_t test_vmslt_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmslt_vv_i16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1113,7 +1113,7 @@ vbool16_t test_vmslt_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmslt_vx_i16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1124,7 +1124,7 @@ vbool16_t test_vmslt_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmslt_vv_i16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1135,7 +1135,7 @@ vbool8_t test_vmslt_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmslt_vx_i16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1146,7 +1146,7 @@ vbool8_t test_vmslt_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmslt_vv_i16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmslt_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1157,7 +1157,7 @@ vbool4_t test_vmslt_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmslt_vx_i16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmslt_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1168,7 +1168,7 @@ vbool4_t test_vmslt_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmslt_vv_i16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmslt_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1179,7 +1179,7 @@ vbool2_t test_vmslt_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, // // CHECK-RV64-LABEL: @test_vmslt_vx_i16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmslt_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1190,7 +1190,7 @@ vbool2_t test_vmslt_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, // // CHECK-RV64-LABEL: @test_vmslt_vv_i32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1202,7 +1202,7 @@ vbool64_t test_vmslt_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmslt_vx_i32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1213,7 +1213,7 @@ vbool64_t test_vmslt_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmslt_vv_i32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1224,7 +1224,7 @@ vbool32_t test_vmslt_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmslt_vx_i32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1235,7 +1235,7 @@ vbool32_t test_vmslt_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmslt_vv_i32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1246,7 +1246,7 @@ vbool16_t test_vmslt_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmslt_vx_i32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1257,7 +1257,7 @@ vbool16_t test_vmslt_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmslt_vv_i32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1268,7 +1268,7 @@ vbool8_t test_vmslt_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmslt_vx_i32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1279,7 +1279,7 @@ vbool8_t test_vmslt_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmslt_vv_i32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmslt_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1290,7 +1290,7 @@ vbool4_t test_vmslt_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmslt_vx_i32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmslt_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1301,7 +1301,7 @@ vbool4_t test_vmslt_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmslt_vv_i64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1312,7 +1312,7 @@ vbool64_t test_vmslt_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmslt_vx_i64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1323,7 +1323,7 @@ vbool64_t test_vmslt_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmslt_vv_i64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1334,7 +1334,7 @@ vbool32_t test_vmslt_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmslt_vx_i64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1345,7 +1345,7 @@ vbool32_t test_vmslt_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmslt_vv_i64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1356,7 +1356,7 @@ vbool16_t test_vmslt_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmslt_vx_i64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1367,7 +1367,7 @@ vbool16_t test_vmslt_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmslt_vv_i64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1378,7 +1378,7 @@ vbool8_t test_vmslt_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmslt_vx_i64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1389,7 +1389,7 @@ vbool8_t test_vmslt_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1401,7 +1401,7 @@ vbool64_t test_vmsltu_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1412,7 +1412,7 @@ vbool64_t test_vmsltu_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1424,7 +1424,7 @@ vbool32_t test_vmsltu_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1435,7 +1435,7 @@ vbool32_t test_vmsltu_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1447,7 +1447,7 @@ vbool16_t test_vmsltu_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1458,7 +1458,7 @@ vbool16_t test_vmsltu_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1469,7 +1469,7 @@ vbool8_t test_vmsltu_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1480,7 +1480,7 @@ vbool8_t test_vmsltu_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsltu_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1491,7 +1491,7 @@ vbool4_t test_vmsltu_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsltu_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1502,7 +1502,7 @@ vbool4_t test_vmsltu_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsltu_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1513,7 +1513,7 @@ vbool2_t test_vmsltu_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsltu_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1524,7 +1524,7 @@ vbool2_t test_vmsltu_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsltu_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, @@ -1535,7 +1535,7 @@ vbool1_t test_vmsltu_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsltu_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, @@ -1546,7 +1546,7 @@ vbool1_t test_vmsltu_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsltu_vv_u16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1558,7 +1558,7 @@ vbool64_t test_vmsltu_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsltu_vx_u16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1570,7 +1570,7 @@ vbool64_t test_vmsltu_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsltu_vv_u16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1582,7 +1582,7 @@ vbool32_t test_vmsltu_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsltu_vx_u16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1594,7 +1594,7 @@ vbool32_t test_vmsltu_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1606,7 +1606,7 @@ vbool16_t test_vmsltu_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1617,7 +1617,7 @@ vbool16_t test_vmsltu_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1629,7 +1629,7 @@ vbool8_t test_vmsltu_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1640,7 +1640,7 @@ vbool8_t test_vmsltu_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsltu_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1652,7 +1652,7 @@ vbool4_t test_vmsltu_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsltu_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1663,7 +1663,7 @@ vbool4_t test_vmsltu_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsltu_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1675,7 +1675,7 @@ vbool2_t test_vmsltu_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsltu_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1686,7 +1686,7 @@ vbool2_t test_vmsltu_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsltu_vv_u32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1698,7 +1698,7 @@ vbool64_t test_vmsltu_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsltu_vx_u32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1710,7 +1710,7 @@ vbool64_t test_vmsltu_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1722,7 +1722,7 @@ vbool32_t test_vmsltu_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1733,7 +1733,7 @@ vbool32_t test_vmsltu_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1745,7 +1745,7 @@ vbool16_t test_vmsltu_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1756,7 +1756,7 @@ vbool16_t test_vmsltu_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1768,7 +1768,7 @@ vbool8_t test_vmsltu_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1779,7 +1779,7 @@ vbool8_t test_vmsltu_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsltu_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1791,7 +1791,7 @@ vbool4_t test_vmsltu_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsltu_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1802,7 +1802,7 @@ vbool4_t test_vmsltu_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1814,7 +1814,7 @@ vbool64_t test_vmsltu_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1825,7 +1825,7 @@ vbool64_t test_vmsltu_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1837,7 +1837,7 @@ vbool32_t test_vmsltu_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1848,7 +1848,7 @@ vbool32_t test_vmsltu_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1860,7 +1860,7 @@ vbool16_t test_vmsltu_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1871,7 +1871,7 @@ vbool16_t test_vmsltu_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1883,7 +1883,7 @@ vbool8_t test_vmsltu_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsne.c index a3fb1cd..61b2d97 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsne.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsne.c @@ -893,7 +893,7 @@ vbool8_t test_vmsne_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { // // CHECK-RV64-LABEL: @test_vmsne_vv_i8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -904,7 +904,7 @@ vbool64_t test_vmsne_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vx_i8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -915,7 +915,7 @@ vbool64_t test_vmsne_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vv_i8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -926,7 +926,7 @@ vbool32_t test_vmsne_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vx_i8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -937,7 +937,7 @@ vbool32_t test_vmsne_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vv_i8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -948,7 +948,7 @@ vbool16_t test_vmsne_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vx_i8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -959,7 +959,7 @@ vbool16_t test_vmsne_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vv_i8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -970,7 +970,7 @@ vbool8_t test_vmsne_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vx_i8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -981,7 +981,7 @@ vbool8_t test_vmsne_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vv_i8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -992,7 +992,7 @@ vbool4_t test_vmsne_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vx_i8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1003,7 +1003,7 @@ vbool4_t test_vmsne_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vv_i8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1014,7 +1014,7 @@ vbool2_t test_vmsne_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vx_i8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1025,7 +1025,7 @@ vbool2_t test_vmsne_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vv_i8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsne_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, @@ -1036,7 +1036,7 @@ vbool1_t test_vmsne_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vx_i8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsne_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, @@ -1047,7 +1047,7 @@ vbool1_t test_vmsne_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vv_i16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1059,7 +1059,7 @@ vbool64_t test_vmsne_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vx_i16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1070,7 +1070,7 @@ vbool64_t test_vmsne_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vv_i16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1082,7 +1082,7 @@ vbool32_t test_vmsne_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vx_i16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1093,7 +1093,7 @@ vbool32_t test_vmsne_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vv_i16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1104,7 +1104,7 @@ vbool16_t test_vmsne_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vx_i16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1115,7 +1115,7 @@ vbool16_t test_vmsne_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vv_i16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1126,7 +1126,7 @@ vbool8_t test_vmsne_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vx_i16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1137,7 +1137,7 @@ vbool8_t test_vmsne_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vv_i16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1148,7 +1148,7 @@ vbool4_t test_vmsne_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vx_i16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1159,7 +1159,7 @@ vbool4_t test_vmsne_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vv_i16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1170,7 +1170,7 @@ vbool2_t test_vmsne_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vx_i16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1181,7 +1181,7 @@ vbool2_t test_vmsne_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vv_i32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1193,7 +1193,7 @@ vbool64_t test_vmsne_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vx_i32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1204,7 +1204,7 @@ vbool64_t test_vmsne_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vv_i32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1215,7 +1215,7 @@ vbool32_t test_vmsne_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vx_i32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1226,7 +1226,7 @@ vbool32_t test_vmsne_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vv_i32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1237,7 +1237,7 @@ vbool16_t test_vmsne_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vx_i32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1248,7 +1248,7 @@ vbool16_t test_vmsne_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vv_i32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1259,7 +1259,7 @@ vbool8_t test_vmsne_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vx_i32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1270,7 +1270,7 @@ vbool8_t test_vmsne_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vv_i32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1281,7 +1281,7 @@ vbool4_t test_vmsne_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vx_i32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1292,7 +1292,7 @@ vbool4_t test_vmsne_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vv_i64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1303,7 +1303,7 @@ vbool64_t test_vmsne_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vx_i64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1314,7 +1314,7 @@ vbool64_t test_vmsne_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vv_i64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1325,7 +1325,7 @@ vbool32_t test_vmsne_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vx_i64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1336,7 +1336,7 @@ vbool32_t test_vmsne_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vv_i64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1347,7 +1347,7 @@ vbool16_t test_vmsne_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vx_i64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1358,7 +1358,7 @@ vbool16_t test_vmsne_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vv_i64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1369,7 +1369,7 @@ vbool8_t test_vmsne_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vx_i64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1380,7 +1380,7 @@ vbool8_t test_vmsne_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vv_u8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1392,7 +1392,7 @@ vbool64_t test_vmsne_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vx_u8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1403,7 +1403,7 @@ vbool64_t test_vmsne_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vv_u8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1415,7 +1415,7 @@ vbool32_t test_vmsne_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vx_u8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1426,7 +1426,7 @@ vbool32_t test_vmsne_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vv_u8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1438,7 +1438,7 @@ vbool16_t test_vmsne_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vx_u8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1449,7 +1449,7 @@ vbool16_t test_vmsne_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vv_u8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1460,7 +1460,7 @@ vbool8_t test_vmsne_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vx_u8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1471,7 +1471,7 @@ vbool8_t test_vmsne_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vv_u8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1482,7 +1482,7 @@ vbool4_t test_vmsne_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vx_u8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1493,7 +1493,7 @@ vbool4_t test_vmsne_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vv_u8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1504,7 +1504,7 @@ vbool2_t test_vmsne_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vx_u8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1515,7 +1515,7 @@ vbool2_t test_vmsne_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vv_u8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsne_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, @@ -1526,7 +1526,7 @@ vbool1_t test_vmsne_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vx_u8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsne_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, @@ -1537,7 +1537,7 @@ vbool1_t test_vmsne_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vv_u16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1549,7 +1549,7 @@ vbool64_t test_vmsne_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vx_u16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1561,7 +1561,7 @@ vbool64_t test_vmsne_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vv_u16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1573,7 +1573,7 @@ vbool32_t test_vmsne_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vx_u16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1585,7 +1585,7 @@ vbool32_t test_vmsne_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vv_u16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1597,7 +1597,7 @@ vbool16_t test_vmsne_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vx_u16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1608,7 +1608,7 @@ vbool16_t test_vmsne_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vv_u16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1619,7 +1619,7 @@ vbool8_t test_vmsne_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vx_u16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1630,7 +1630,7 @@ vbool8_t test_vmsne_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vv_u16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1641,7 +1641,7 @@ vbool4_t test_vmsne_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vx_u16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1652,7 +1652,7 @@ vbool4_t test_vmsne_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vv_u16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1663,7 +1663,7 @@ vbool2_t test_vmsne_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vx_u16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1674,7 +1674,7 @@ vbool2_t test_vmsne_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vv_u32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1686,7 +1686,7 @@ vbool64_t test_vmsne_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vx_u32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1698,7 +1698,7 @@ vbool64_t test_vmsne_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vv_u32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1710,7 +1710,7 @@ vbool32_t test_vmsne_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vx_u32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1721,7 +1721,7 @@ vbool32_t test_vmsne_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vv_u32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1733,7 +1733,7 @@ vbool16_t test_vmsne_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vx_u32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1744,7 +1744,7 @@ vbool16_t test_vmsne_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vv_u32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1755,7 +1755,7 @@ vbool8_t test_vmsne_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vx_u32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1766,7 +1766,7 @@ vbool8_t test_vmsne_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vv_u32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1777,7 +1777,7 @@ vbool4_t test_vmsne_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vx_u32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1788,7 +1788,7 @@ vbool4_t test_vmsne_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vv_u64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1800,7 +1800,7 @@ vbool64_t test_vmsne_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vx_u64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1811,7 +1811,7 @@ vbool64_t test_vmsne_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vv_u64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1823,7 +1823,7 @@ vbool32_t test_vmsne_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vx_u64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1834,7 +1834,7 @@ vbool32_t test_vmsne_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vv_u64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1846,7 +1846,7 @@ vbool16_t test_vmsne_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vx_u64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1857,7 +1857,7 @@ vbool16_t test_vmsne_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vv_u64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1868,7 +1868,7 @@ vbool8_t test_vmsne_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, // // CHECK-RV64-LABEL: @test_vmsne_vx_u64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsof.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsof.c index 52f5f97..03cec0e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsof.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsof.c @@ -63,7 +63,7 @@ vbool64_t test_vmsof_m_b64(vbool64_t op1, size_t vl) { return vmsof(op1, vl); } // // CHECK-RV64-LABEL: @test_vmsof_m_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv64i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv64i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsof_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1, @@ -74,7 +74,7 @@ vbool1_t test_vmsof_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1, // // CHECK-RV64-LABEL: @test_vmsof_m_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv32i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv32i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsof_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1, @@ -85,7 +85,7 @@ vbool2_t test_vmsof_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1, // // CHECK-RV64-LABEL: @test_vmsof_m_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv16i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv16i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsof_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1, @@ -96,7 +96,7 @@ vbool4_t test_vmsof_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1, // // CHECK-RV64-LABEL: @test_vmsof_m_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv8i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv8i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsof_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1, @@ -107,7 +107,7 @@ vbool8_t test_vmsof_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1, // // CHECK-RV64-LABEL: @test_vmsof_m_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv4i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv4i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsof_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1, @@ -118,7 +118,7 @@ vbool16_t test_vmsof_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1, // // CHECK-RV64-LABEL: @test_vmsof_m_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv2i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv2i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsof_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1, @@ -129,7 +129,7 @@ vbool32_t test_vmsof_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1, // // CHECK-RV64-LABEL: @test_vmsof_m_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv1i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv1i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsof_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c index 024050f..435ffae 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c @@ -2207,7 +2207,7 @@ vint64m8_t test_vmulhsu_vx_i64m8(vint64m8_t op1, uint64_t op2, size_t vl) { // // CHECK-RV64-LABEL: @test_vmul_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -2217,7 +2217,7 @@ vint8mf8_t test_vmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t // // CHECK-RV64-LABEL: @test_vmul_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { @@ -2227,7 +2227,7 @@ vint8mf8_t test_vmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t // // CHECK-RV64-LABEL: @test_vmul_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -2237,7 +2237,7 @@ vint8mf4_t test_vmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t // // CHECK-RV64-LABEL: @test_vmul_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { @@ -2247,7 +2247,7 @@ vint8mf4_t test_vmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t // // CHECK-RV64-LABEL: @test_vmul_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -2257,7 +2257,7 @@ vint8mf2_t test_vmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t // // CHECK-RV64-LABEL: @test_vmul_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { @@ -2267,7 +2267,7 @@ vint8mf2_t test_vmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t // // CHECK-RV64-LABEL: @test_vmul_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmul_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -2277,7 +2277,7 @@ vint8m1_t test_vmul_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, // // CHECK-RV64-LABEL: @test_vmul_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmul_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { @@ -2287,7 +2287,7 @@ vint8m1_t test_vmul_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, // // CHECK-RV64-LABEL: @test_vmul_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmul_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -2297,7 +2297,7 @@ vint8m2_t test_vmul_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, // // CHECK-RV64-LABEL: @test_vmul_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmul_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { @@ -2307,7 +2307,7 @@ vint8m2_t test_vmul_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, // // CHECK-RV64-LABEL: @test_vmul_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmul_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -2317,7 +2317,7 @@ vint8m4_t test_vmul_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, // // CHECK-RV64-LABEL: @test_vmul_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmul_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { @@ -2327,7 +2327,7 @@ vint8m4_t test_vmul_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, // // CHECK-RV64-LABEL: @test_vmul_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmul_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -2337,7 +2337,7 @@ vint8m8_t test_vmul_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, // // CHECK-RV64-LABEL: @test_vmul_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmul_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { @@ -2347,7 +2347,7 @@ vint8m8_t test_vmul_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, // // CHECK-RV64-LABEL: @test_vmul_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -2357,7 +2357,7 @@ vint16mf4_t test_vmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vmul_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { @@ -2367,7 +2367,7 @@ vint16mf4_t test_vmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vmul_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -2377,7 +2377,7 @@ vint16mf2_t test_vmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vmul_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { @@ -2387,7 +2387,7 @@ vint16mf2_t test_vmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vmul_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmul_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -2397,7 +2397,7 @@ vint16m1_t test_vmul_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t // // CHECK-RV64-LABEL: @test_vmul_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmul_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { @@ -2407,7 +2407,7 @@ vint16m1_t test_vmul_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t // // CHECK-RV64-LABEL: @test_vmul_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmul_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -2417,7 +2417,7 @@ vint16m2_t test_vmul_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // // CHECK-RV64-LABEL: @test_vmul_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmul_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { @@ -2427,7 +2427,7 @@ vint16m2_t test_vmul_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // // CHECK-RV64-LABEL: @test_vmul_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmul_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -2437,7 +2437,7 @@ vint16m4_t test_vmul_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // // CHECK-RV64-LABEL: @test_vmul_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmul_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { @@ -2447,7 +2447,7 @@ vint16m4_t test_vmul_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // // CHECK-RV64-LABEL: @test_vmul_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmul_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -2457,7 +2457,7 @@ vint16m8_t test_vmul_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // // CHECK-RV64-LABEL: @test_vmul_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmul_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { @@ -2467,7 +2467,7 @@ vint16m8_t test_vmul_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // // CHECK-RV64-LABEL: @test_vmul_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -2477,7 +2477,7 @@ vint32mf2_t test_vmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m // // CHECK-RV64-LABEL: @test_vmul_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { @@ -2487,7 +2487,7 @@ vint32mf2_t test_vmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m // // CHECK-RV64-LABEL: @test_vmul_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmul_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -2497,7 +2497,7 @@ vint32m1_t test_vmul_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t // // CHECK-RV64-LABEL: @test_vmul_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmul_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { @@ -2507,7 +2507,7 @@ vint32m1_t test_vmul_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t // // CHECK-RV64-LABEL: @test_vmul_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmul_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -2517,7 +2517,7 @@ vint32m2_t test_vmul_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t // // CHECK-RV64-LABEL: @test_vmul_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmul_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { @@ -2527,7 +2527,7 @@ vint32m2_t test_vmul_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t // // CHECK-RV64-LABEL: @test_vmul_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmul_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -2537,7 +2537,7 @@ vint32m4_t test_vmul_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // // CHECK-RV64-LABEL: @test_vmul_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmul_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { @@ -2547,7 +2547,7 @@ vint32m4_t test_vmul_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // // CHECK-RV64-LABEL: @test_vmul_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmul_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -2557,7 +2557,7 @@ vint32m8_t test_vmul_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // // CHECK-RV64-LABEL: @test_vmul_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmul_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { @@ -2567,7 +2567,7 @@ vint32m8_t test_vmul_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // // CHECK-RV64-LABEL: @test_vmul_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -2577,7 +2577,7 @@ vint64m1_t test_vmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t // // CHECK-RV64-LABEL: @test_vmul_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { @@ -2587,7 +2587,7 @@ vint64m1_t test_vmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t // // CHECK-RV64-LABEL: @test_vmul_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -2597,7 +2597,7 @@ vint64m2_t test_vmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t // // CHECK-RV64-LABEL: @test_vmul_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { @@ -2607,7 +2607,7 @@ vint64m2_t test_vmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t // // CHECK-RV64-LABEL: @test_vmul_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -2617,7 +2617,7 @@ vint64m4_t test_vmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t // // CHECK-RV64-LABEL: @test_vmul_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { @@ -2627,7 +2627,7 @@ vint64m4_t test_vmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t // // CHECK-RV64-LABEL: @test_vmul_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -2637,7 +2637,7 @@ vint64m8_t test_vmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // // CHECK-RV64-LABEL: @test_vmul_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { @@ -2647,7 +2647,7 @@ vint64m8_t test_vmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // // CHECK-RV64-LABEL: @test_vmul_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmul_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -2657,7 +2657,7 @@ vuint8mf8_t test_vmul_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf // // CHECK-RV64-LABEL: @test_vmul_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmul_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -2667,7 +2667,7 @@ vuint8mf8_t test_vmul_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf // // CHECK-RV64-LABEL: @test_vmul_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmul_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -2677,7 +2677,7 @@ vuint8mf4_t test_vmul_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf // // CHECK-RV64-LABEL: @test_vmul_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmul_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -2687,7 +2687,7 @@ vuint8mf4_t test_vmul_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf // // CHECK-RV64-LABEL: @test_vmul_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmul_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -2697,7 +2697,7 @@ vuint8mf2_t test_vmul_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf // // CHECK-RV64-LABEL: @test_vmul_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmul_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -2707,7 +2707,7 @@ vuint8mf2_t test_vmul_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf // // CHECK-RV64-LABEL: @test_vmul_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmul_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -2717,7 +2717,7 @@ vuint8m1_t test_vmul_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t o // // CHECK-RV64-LABEL: @test_vmul_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmul_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -2727,7 +2727,7 @@ vuint8m1_t test_vmul_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t o // // CHECK-RV64-LABEL: @test_vmul_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmul_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -2737,7 +2737,7 @@ vuint8m2_t test_vmul_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t o // // CHECK-RV64-LABEL: @test_vmul_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmul_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -2747,7 +2747,7 @@ vuint8m2_t test_vmul_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t o // // CHECK-RV64-LABEL: @test_vmul_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmul_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -2757,7 +2757,7 @@ vuint8m4_t test_vmul_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t o // // CHECK-RV64-LABEL: @test_vmul_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmul_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -2767,7 +2767,7 @@ vuint8m4_t test_vmul_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t o // // CHECK-RV64-LABEL: @test_vmul_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmul_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -2777,7 +2777,7 @@ vuint8m8_t test_vmul_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t o // // CHECK-RV64-LABEL: @test_vmul_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmul_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -2787,7 +2787,7 @@ vuint8m8_t test_vmul_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t o // // CHECK-RV64-LABEL: @test_vmul_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmul_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -2797,7 +2797,7 @@ vuint16mf4_t test_vmul_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vmul_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmul_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -2807,7 +2807,7 @@ vuint16mf4_t test_vmul_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vmul_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmul_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -2817,7 +2817,7 @@ vuint16mf2_t test_vmul_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vmul_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmul_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -2827,7 +2827,7 @@ vuint16mf2_t test_vmul_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vmul_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmul_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -2837,7 +2837,7 @@ vuint16m1_t test_vmul_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m // // CHECK-RV64-LABEL: @test_vmul_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmul_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -2847,7 +2847,7 @@ vuint16m1_t test_vmul_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m // // CHECK-RV64-LABEL: @test_vmul_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmul_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -2857,7 +2857,7 @@ vuint16m2_t test_vmul_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2 // // CHECK-RV64-LABEL: @test_vmul_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmul_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -2867,7 +2867,7 @@ vuint16m2_t test_vmul_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2 // // CHECK-RV64-LABEL: @test_vmul_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmul_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -2877,7 +2877,7 @@ vuint16m4_t test_vmul_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4 // // CHECK-RV64-LABEL: @test_vmul_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmul_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -2887,7 +2887,7 @@ vuint16m4_t test_vmul_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4 // // CHECK-RV64-LABEL: @test_vmul_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmul_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -2897,7 +2897,7 @@ vuint16m8_t test_vmul_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8 // // CHECK-RV64-LABEL: @test_vmul_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmul_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -2907,7 +2907,7 @@ vuint16m8_t test_vmul_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8 // // CHECK-RV64-LABEL: @test_vmul_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmul_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -2917,7 +2917,7 @@ vuint32mf2_t test_vmul_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vmul_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmul_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -2927,7 +2927,7 @@ vuint32mf2_t test_vmul_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vmul_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmul_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -2937,7 +2937,7 @@ vuint32m1_t test_vmul_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vmul_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmul_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -2947,7 +2947,7 @@ vuint32m1_t test_vmul_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vmul_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmul_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -2957,7 +2957,7 @@ vuint32m2_t test_vmul_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vmul_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmul_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -2967,7 +2967,7 @@ vuint32m2_t test_vmul_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vmul_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmul_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -2977,7 +2977,7 @@ vuint32m4_t test_vmul_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4 // // CHECK-RV64-LABEL: @test_vmul_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmul_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -2987,7 +2987,7 @@ vuint32m4_t test_vmul_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4 // // CHECK-RV64-LABEL: @test_vmul_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmul_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -2997,7 +2997,7 @@ vuint32m8_t test_vmul_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8 // // CHECK-RV64-LABEL: @test_vmul_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmul_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -3007,7 +3007,7 @@ vuint32m8_t test_vmul_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8 // // CHECK-RV64-LABEL: @test_vmul_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmul_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -3017,7 +3017,7 @@ vuint64m1_t test_vmul_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vmul_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmul_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -3027,7 +3027,7 @@ vuint64m1_t test_vmul_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vmul_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmul_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -3037,7 +3037,7 @@ vuint64m2_t test_vmul_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vmul_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmul_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -3047,7 +3047,7 @@ vuint64m2_t test_vmul_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vmul_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmul_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -3057,7 +3057,7 @@ vuint64m4_t test_vmul_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vmul_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmul_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -3067,7 +3067,7 @@ vuint64m4_t test_vmul_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vmul_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmul_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -3077,7 +3077,7 @@ vuint64m8_t test_vmul_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8 // // CHECK-RV64-LABEL: @test_vmul_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmul_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { @@ -3087,7 +3087,7 @@ vuint64m8_t test_vmul_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8 // // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulh_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -3097,7 +3097,7 @@ vint8mf8_t test_vmulh_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_ // // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulh_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { @@ -3107,7 +3107,7 @@ vint8mf8_t test_vmulh_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_ // // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulh_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -3117,7 +3117,7 @@ vint8mf4_t test_vmulh_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_ // // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulh_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { @@ -3127,7 +3127,7 @@ vint8mf4_t test_vmulh_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_ // // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulh_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -3137,7 +3137,7 @@ vint8mf2_t test_vmulh_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_ // // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulh_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { @@ -3147,7 +3147,7 @@ vint8mf2_t test_vmulh_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_ // // CHECK-RV64-LABEL: @test_vmulh_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulh_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -3157,7 +3157,7 @@ vint8m1_t test_vmulh_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1 // // CHECK-RV64-LABEL: @test_vmulh_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulh_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { @@ -3167,7 +3167,7 @@ vint8m1_t test_vmulh_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1 // // CHECK-RV64-LABEL: @test_vmulh_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulh_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -3177,7 +3177,7 @@ vint8m2_t test_vmulh_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1 // // CHECK-RV64-LABEL: @test_vmulh_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulh_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { @@ -3187,7 +3187,7 @@ vint8m2_t test_vmulh_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1 // // CHECK-RV64-LABEL: @test_vmulh_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulh_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -3197,7 +3197,7 @@ vint8m4_t test_vmulh_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1 // // CHECK-RV64-LABEL: @test_vmulh_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulh_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { @@ -3207,7 +3207,7 @@ vint8m4_t test_vmulh_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1 // // CHECK-RV64-LABEL: @test_vmulh_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulh_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -3217,7 +3217,7 @@ vint8m8_t test_vmulh_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1 // // CHECK-RV64-LABEL: @test_vmulh_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulh_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { @@ -3227,7 +3227,7 @@ vint8m8_t test_vmulh_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1 // // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulh_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -3237,7 +3237,7 @@ vint16mf4_t test_vmulh_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16 // // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulh_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { @@ -3247,7 +3247,7 @@ vint16mf4_t test_vmulh_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16 // // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulh_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -3257,7 +3257,7 @@ vint16mf2_t test_vmulh_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16 // // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulh_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { @@ -3267,7 +3267,7 @@ vint16mf2_t test_vmulh_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16 // // CHECK-RV64-LABEL: @test_vmulh_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulh_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -3277,7 +3277,7 @@ vint16m1_t test_vmulh_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_ // // CHECK-RV64-LABEL: @test_vmulh_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulh_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { @@ -3287,7 +3287,7 @@ vint16m1_t test_vmulh_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_ // // CHECK-RV64-LABEL: @test_vmulh_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulh_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -3297,7 +3297,7 @@ vint16m2_t test_vmulh_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // // CHECK-RV64-LABEL: @test_vmulh_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulh_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { @@ -3307,7 +3307,7 @@ vint16m2_t test_vmulh_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // // CHECK-RV64-LABEL: @test_vmulh_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulh_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -3317,7 +3317,7 @@ vint16m4_t test_vmulh_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // // CHECK-RV64-LABEL: @test_vmulh_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulh_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { @@ -3327,7 +3327,7 @@ vint16m4_t test_vmulh_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // // CHECK-RV64-LABEL: @test_vmulh_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulh_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -3337,7 +3337,7 @@ vint16m8_t test_vmulh_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // // CHECK-RV64-LABEL: @test_vmulh_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulh_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { @@ -3347,7 +3347,7 @@ vint16m8_t test_vmulh_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // // CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulh_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -3357,7 +3357,7 @@ vint32mf2_t test_vmulh_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32 // // CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulh_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { @@ -3367,7 +3367,7 @@ vint32mf2_t test_vmulh_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32 // // CHECK-RV64-LABEL: @test_vmulh_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulh_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -3377,7 +3377,7 @@ vint32m1_t test_vmulh_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_ // // CHECK-RV64-LABEL: @test_vmulh_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulh_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { @@ -3387,7 +3387,7 @@ vint32m1_t test_vmulh_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_ // // CHECK-RV64-LABEL: @test_vmulh_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulh_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -3397,7 +3397,7 @@ vint32m2_t test_vmulh_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_ // // CHECK-RV64-LABEL: @test_vmulh_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulh_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { @@ -3407,7 +3407,7 @@ vint32m2_t test_vmulh_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_ // // CHECK-RV64-LABEL: @test_vmulh_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulh_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -3417,7 +3417,7 @@ vint32m4_t test_vmulh_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // // CHECK-RV64-LABEL: @test_vmulh_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulh_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { @@ -3427,7 +3427,7 @@ vint32m4_t test_vmulh_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // // CHECK-RV64-LABEL: @test_vmulh_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulh_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -3437,7 +3437,7 @@ vint32m8_t test_vmulh_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // // CHECK-RV64-LABEL: @test_vmulh_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulh_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { @@ -3447,7 +3447,7 @@ vint32m8_t test_vmulh_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // // CHECK-RV64-LABEL: @test_vmulh_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulh_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -3457,7 +3457,7 @@ vint64m1_t test_vmulh_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_ // // CHECK-RV64-LABEL: @test_vmulh_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulh_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { @@ -3467,7 +3467,7 @@ vint64m1_t test_vmulh_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_ // // CHECK-RV64-LABEL: @test_vmulh_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulh_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -3477,7 +3477,7 @@ vint64m2_t test_vmulh_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_ // // CHECK-RV64-LABEL: @test_vmulh_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulh_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { @@ -3487,7 +3487,7 @@ vint64m2_t test_vmulh_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_ // // CHECK-RV64-LABEL: @test_vmulh_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulh_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -3497,7 +3497,7 @@ vint64m4_t test_vmulh_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_ // // CHECK-RV64-LABEL: @test_vmulh_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulh_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { @@ -3507,7 +3507,7 @@ vint64m4_t test_vmulh_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_ // // CHECK-RV64-LABEL: @test_vmulh_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulh_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -3517,7 +3517,7 @@ vint64m8_t test_vmulh_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // // CHECK-RV64-LABEL: @test_vmulh_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulh_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { @@ -3527,7 +3527,7 @@ vint64m8_t test_vmulh_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmulhu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -3537,7 +3537,7 @@ vuint8mf8_t test_vmulhu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8 // // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmulhu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -3547,7 +3547,7 @@ vuint8mf8_t test_vmulhu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8 // // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmulhu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -3557,7 +3557,7 @@ vuint8mf4_t test_vmulhu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8 // // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmulhu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -3567,7 +3567,7 @@ vuint8mf4_t test_vmulhu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8 // // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmulhu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -3577,7 +3577,7 @@ vuint8mf2_t test_vmulhu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8 // // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmulhu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -3587,7 +3587,7 @@ vuint8mf2_t test_vmulhu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8 // // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmulhu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -3597,7 +3597,7 @@ vuint8m1_t test_vmulhu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmulhu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -3607,7 +3607,7 @@ vuint8m1_t test_vmulhu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmulhu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -3617,7 +3617,7 @@ vuint8m2_t test_vmulhu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmulhu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -3627,7 +3627,7 @@ vuint8m2_t test_vmulhu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmulhu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -3637,7 +3637,7 @@ vuint8m4_t test_vmulhu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmulhu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -3647,7 +3647,7 @@ vuint8m4_t test_vmulhu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmulhu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -3657,7 +3657,7 @@ vuint8m8_t test_vmulhu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmulhu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -3667,7 +3667,7 @@ vuint8m8_t test_vmulhu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmulhu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -3677,7 +3677,7 @@ vuint16mf4_t test_vmulhu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vui // // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmulhu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -3687,7 +3687,7 @@ vuint16mf4_t test_vmulhu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vui // // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmulhu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -3697,7 +3697,7 @@ vuint16mf2_t test_vmulhu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vui // // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmulhu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -3707,7 +3707,7 @@ vuint16mf2_t test_vmulhu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vui // // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmulhu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -3717,7 +3717,7 @@ vuint16m1_t test_vmulhu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint1 // // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmulhu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -3727,7 +3727,7 @@ vuint16m1_t test_vmulhu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint1 // // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmulhu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -3737,7 +3737,7 @@ vuint16m2_t test_vmulhu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16 // // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmulhu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -3747,7 +3747,7 @@ vuint16m2_t test_vmulhu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16 // // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmulhu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -3757,7 +3757,7 @@ vuint16m4_t test_vmulhu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16 // // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmulhu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -3767,7 +3767,7 @@ vuint16m4_t test_vmulhu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16 // // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmulhu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -3777,7 +3777,7 @@ vuint16m8_t test_vmulhu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16 // // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmulhu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -3787,7 +3787,7 @@ vuint16m8_t test_vmulhu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16 // // CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmulhu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -3797,7 +3797,7 @@ vuint32mf2_t test_vmulhu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vui // // CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmulhu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -3807,7 +3807,7 @@ vuint32mf2_t test_vmulhu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vui // // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmulhu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -3817,7 +3817,7 @@ vuint32m1_t test_vmulhu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint3 // // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmulhu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -3827,7 +3827,7 @@ vuint32m1_t test_vmulhu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint3 // // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmulhu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -3837,7 +3837,7 @@ vuint32m2_t test_vmulhu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint3 // // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmulhu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -3847,7 +3847,7 @@ vuint32m2_t test_vmulhu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint3 // // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmulhu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -3857,7 +3857,7 @@ vuint32m4_t test_vmulhu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32 // // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmulhu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -3867,7 +3867,7 @@ vuint32m4_t test_vmulhu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32 // // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmulhu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -3877,7 +3877,7 @@ vuint32m8_t test_vmulhu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32 // // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmulhu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -3887,7 +3887,7 @@ vuint32m8_t test_vmulhu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32 // // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmulhu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -3897,7 +3897,7 @@ vuint64m1_t test_vmulhu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint6 // // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmulhu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -3907,7 +3907,7 @@ vuint64m1_t test_vmulhu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint6 // // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmulhu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -3917,7 +3917,7 @@ vuint64m2_t test_vmulhu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint6 // // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmulhu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -3927,7 +3927,7 @@ vuint64m2_t test_vmulhu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint6 // // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmulhu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -3937,7 +3937,7 @@ vuint64m4_t test_vmulhu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint6 // // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmulhu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -3947,7 +3947,7 @@ vuint64m4_t test_vmulhu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint6 // // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmulhu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -3957,7 +3957,7 @@ vuint64m8_t test_vmulhu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64 // // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmulhu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { @@ -3967,7 +3967,7 @@ vuint64m8_t test_vmulhu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64 // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulhsu_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -3977,7 +3977,7 @@ vint8mf8_t test_vmulhsu_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulhsu_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) { @@ -3987,7 +3987,7 @@ vint8mf8_t test_vmulhsu_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulhsu_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -3997,7 +3997,7 @@ vint8mf4_t test_vmulhsu_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulhsu_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) { @@ -4007,7 +4007,7 @@ vint8mf4_t test_vmulhsu_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulhsu_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -4017,7 +4017,7 @@ vint8mf2_t test_vmulhsu_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulhsu_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) { @@ -4027,7 +4027,7 @@ vint8mf2_t test_vmulhsu_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulhsu_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -4037,7 +4037,7 @@ vint8m1_t test_vmulhsu_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulhsu_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) { @@ -4047,7 +4047,7 @@ vint8m1_t test_vmulhsu_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t o // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulhsu_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -4057,7 +4057,7 @@ vint8m2_t test_vmulhsu_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulhsu_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) { @@ -4067,7 +4067,7 @@ vint8m2_t test_vmulhsu_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t o // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulhsu_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -4077,7 +4077,7 @@ vint8m4_t test_vmulhsu_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulhsu_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) { @@ -4087,7 +4087,7 @@ vint8m4_t test_vmulhsu_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t o // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulhsu_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -4097,7 +4097,7 @@ vint8m8_t test_vmulhsu_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulhsu_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, uint8_t op2, size_t vl) { @@ -4107,7 +4107,7 @@ vint8m8_t test_vmulhsu_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t o // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulhsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -4117,7 +4117,7 @@ vint16mf4_t test_vmulhsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulhsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) { @@ -4127,7 +4127,7 @@ vint16mf4_t test_vmulhsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulhsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -4137,7 +4137,7 @@ vint16mf2_t test_vmulhsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulhsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) { @@ -4147,7 +4147,7 @@ vint16mf2_t test_vmulhsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulhsu_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -4157,7 +4157,7 @@ vint16m1_t test_vmulhsu_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulhsu_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) { @@ -4167,7 +4167,7 @@ vint16m1_t test_vmulhsu_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulhsu_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -4177,7 +4177,7 @@ vint16m2_t test_vmulhsu_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2 // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulhsu_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) { @@ -4187,7 +4187,7 @@ vint16m2_t test_vmulhsu_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2 // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulhsu_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -4197,7 +4197,7 @@ vint16m4_t test_vmulhsu_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4 // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulhsu_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) { @@ -4207,7 +4207,7 @@ vint16m4_t test_vmulhsu_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4 // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulhsu_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -4217,7 +4217,7 @@ vint16m8_t test_vmulhsu_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8 // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulhsu_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, uint16_t op2, size_t vl) { @@ -4227,7 +4227,7 @@ vint16m8_t test_vmulhsu_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8 // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulhsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -4237,7 +4237,7 @@ vint32mf2_t test_vmulhsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulhsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) { @@ -4247,7 +4247,7 @@ vint32mf2_t test_vmulhsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulhsu_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -4257,7 +4257,7 @@ vint32m1_t test_vmulhsu_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulhsu_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) { @@ -4267,7 +4267,7 @@ vint32m1_t test_vmulhsu_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulhsu_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -4277,7 +4277,7 @@ vint32m2_t test_vmulhsu_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulhsu_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) { @@ -4287,7 +4287,7 @@ vint32m2_t test_vmulhsu_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulhsu_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -4297,7 +4297,7 @@ vint32m4_t test_vmulhsu_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4 // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulhsu_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { @@ -4307,7 +4307,7 @@ vint32m4_t test_vmulhsu_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4 // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulhsu_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -4317,7 +4317,7 @@ vint32m8_t test_vmulhsu_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8 // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulhsu_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, uint32_t op2, size_t vl) { @@ -4327,7 +4327,7 @@ vint32m8_t test_vmulhsu_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8 // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulhsu_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -4337,7 +4337,7 @@ vint64m1_t test_vmulhsu_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulhsu_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, uint64_t op2, size_t vl) { @@ -4347,7 +4347,7 @@ vint64m1_t test_vmulhsu_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulhsu_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -4357,7 +4357,7 @@ vint64m2_t test_vmulhsu_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulhsu_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, uint64_t op2, size_t vl) { @@ -4367,7 +4367,7 @@ vint64m2_t test_vmulhsu_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulhsu_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -4377,7 +4377,7 @@ vint64m4_t test_vmulhsu_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulhsu_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, uint64_t op2, size_t vl) { @@ -4387,7 +4387,7 @@ vint64m4_t test_vmulhsu_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulhsu_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -4397,7 +4397,7 @@ vint64m8_t test_vmulhsu_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8 // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulhsu_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c index fa08871..29c242a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c @@ -622,7 +622,7 @@ vuint32m4_t test_vnclipu_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) { // // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, @@ -634,7 +634,7 @@ vint8mf8_t test_vnclip_wv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, @@ -645,7 +645,7 @@ vint8mf8_t test_vnclip_wx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, @@ -657,7 +657,7 @@ vint8mf4_t test_vnclip_wv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, @@ -668,7 +668,7 @@ vint8mf4_t test_vnclip_wx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, @@ -680,7 +680,7 @@ vint8mf2_t test_vnclip_wv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, @@ -691,7 +691,7 @@ vint8mf2_t test_vnclip_wx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclip_wv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, @@ -702,7 +702,7 @@ vint8m1_t test_vnclip_wv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclip_wx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, @@ -713,7 +713,7 @@ vint8m1_t test_vnclip_wx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclip_wv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, @@ -724,7 +724,7 @@ vint8m2_t test_vnclip_wv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclip_wx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, @@ -735,7 +735,7 @@ vint8m2_t test_vnclip_wx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclip_wv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, @@ -746,7 +746,7 @@ vint8m4_t test_vnclip_wv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclip_wx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, @@ -757,7 +757,7 @@ vint8m4_t test_vnclip_wx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -769,7 +769,7 @@ vint16mf4_t test_vnclip_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -780,7 +780,7 @@ vint16mf4_t test_vnclip_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -792,7 +792,7 @@ vint16mf2_t test_vnclip_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -803,7 +803,7 @@ vint16mf2_t test_vnclip_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclip_wv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -815,7 +815,7 @@ vint16m1_t test_vnclip_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclip_wx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -826,7 +826,7 @@ vint16m1_t test_vnclip_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclip_wv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -838,7 +838,7 @@ vint16m2_t test_vnclip_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclip_wx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -849,7 +849,7 @@ vint16m2_t test_vnclip_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclip_wv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -861,7 +861,7 @@ vint16m4_t test_vnclip_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclip_wx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -872,7 +872,7 @@ vint16m4_t test_vnclip_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -884,7 +884,7 @@ vint32mf2_t test_vnclip_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -895,7 +895,7 @@ vint32mf2_t test_vnclip_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclip_wv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -907,7 +907,7 @@ vint32m1_t test_vnclip_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclip_wx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -918,7 +918,7 @@ vint32m1_t test_vnclip_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclip_wv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -930,7 +930,7 @@ vint32m2_t test_vnclip_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclip_wx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -941,7 +941,7 @@ vint32m2_t test_vnclip_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclip_wv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -953,7 +953,7 @@ vint32m4_t test_vnclip_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclip_wx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -964,7 +964,7 @@ vint32m4_t test_vnclip_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, @@ -976,7 +976,7 @@ vuint8mf8_t test_vnclipu_wv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, @@ -987,7 +987,7 @@ vuint8mf8_t test_vnclipu_wx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, @@ -999,7 +999,7 @@ vuint8mf4_t test_vnclipu_wv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, @@ -1010,7 +1010,7 @@ vuint8mf4_t test_vnclipu_wx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, @@ -1022,7 +1022,7 @@ vuint8mf2_t test_vnclipu_wv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, @@ -1033,7 +1033,7 @@ vuint8mf2_t test_vnclipu_wx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, @@ -1045,7 +1045,7 @@ vuint8m1_t test_vnclipu_wv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, @@ -1056,7 +1056,7 @@ vuint8m1_t test_vnclipu_wx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, @@ -1068,7 +1068,7 @@ vuint8m2_t test_vnclipu_wv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, @@ -1079,7 +1079,7 @@ vuint8m2_t test_vnclipu_wx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, @@ -1091,7 +1091,7 @@ vuint8m4_t test_vnclipu_wv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, @@ -1102,7 +1102,7 @@ vuint8m4_t test_vnclipu_wx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -1114,7 +1114,7 @@ vuint16mf4_t test_vnclipu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -1126,7 +1126,7 @@ vuint16mf4_t test_vnclipu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -1138,7 +1138,7 @@ vuint16mf2_t test_vnclipu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -1150,7 +1150,7 @@ vuint16mf2_t test_vnclipu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -1162,7 +1162,7 @@ vuint16m1_t test_vnclipu_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -1173,7 +1173,7 @@ vuint16m1_t test_vnclipu_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -1185,7 +1185,7 @@ vuint16m2_t test_vnclipu_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -1196,7 +1196,7 @@ vuint16m2_t test_vnclipu_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -1208,7 +1208,7 @@ vuint16m4_t test_vnclipu_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -1219,7 +1219,7 @@ vuint16m4_t test_vnclipu_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -1231,7 +1231,7 @@ vuint32mf2_t test_vnclipu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -1243,7 +1243,7 @@ vuint32mf2_t test_vnclipu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -1255,7 +1255,7 @@ vuint32m1_t test_vnclipu_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -1266,7 +1266,7 @@ vuint32m1_t test_vnclipu_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -1278,7 +1278,7 @@ vuint32m2_t test_vnclipu_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -1289,7 +1289,7 @@ vuint32m2_t test_vnclipu_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -1301,7 +1301,7 @@ vuint32m4_t test_vnclipu_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vncvt.c index 7523092..e7a9510 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vncvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vncvt.c @@ -307,7 +307,7 @@ vuint32m4_t test_vncvt_x_x_w_u32m4 (vuint64m8_t src, size_t vl) { // // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vncvt_x_x_w_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t src, size_t vl) { @@ -317,7 +317,7 @@ vint8mf8_t test_vncvt_x_x_w_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint1 // // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vncvt_x_x_w_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t src, size_t vl) { @@ -327,7 +327,7 @@ vint8mf4_t test_vncvt_x_x_w_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint1 // // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vncvt_x_x_w_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t src, size_t vl) { @@ -337,7 +337,7 @@ vint8mf2_t test_vncvt_x_x_w_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint1 // // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vncvt_x_x_w_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint16m2_t src, size_t vl) { @@ -347,7 +347,7 @@ vint8m1_t test_vncvt_x_x_w_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint16m2_ // // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vncvt_x_x_w_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint16m4_t src, size_t vl) { @@ -357,7 +357,7 @@ vint8m2_t test_vncvt_x_x_w_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint16m4_ // // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vncvt_x_x_w_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint16m8_t src, size_t vl) { @@ -367,7 +367,7 @@ vint8m4_t test_vncvt_x_x_w_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint16m8_ // // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vncvt_x_x_w_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t src, size_t vl) { @@ -377,7 +377,7 @@ vuint8mf8_t test_vncvt_x_x_w_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vui // // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vncvt_x_x_w_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t src, size_t vl) { @@ -387,7 +387,7 @@ vuint8mf4_t test_vncvt_x_x_w_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vui // // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vncvt_x_x_w_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t src, size_t vl) { @@ -397,7 +397,7 @@ vuint8mf2_t test_vncvt_x_x_w_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vui // // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vncvt_x_x_w_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t src, size_t vl) { @@ -407,7 +407,7 @@ vuint8m1_t test_vncvt_x_x_w_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint16 // // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vncvt_x_x_w_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t src, size_t vl) { @@ -417,7 +417,7 @@ vuint8m2_t test_vncvt_x_x_w_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint16 // // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vncvt_x_x_w_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t src, size_t vl) { @@ -427,7 +427,7 @@ vuint8m4_t test_vncvt_x_x_w_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint16 // // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vncvt_x_x_w_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t src, size_t vl) { @@ -437,7 +437,7 @@ vint16mf4_t test_vncvt_x_x_w_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vi // // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vncvt_x_x_w_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t src, size_t vl) { @@ -447,7 +447,7 @@ vint16mf2_t test_vncvt_x_x_w_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vi // // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vncvt_x_x_w_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint32m2_t src, size_t vl) { @@ -457,7 +457,7 @@ vint16m1_t test_vncvt_x_x_w_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint3 // // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vncvt_x_x_w_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint32m4_t src, size_t vl) { @@ -467,7 +467,7 @@ vint16m2_t test_vncvt_x_x_w_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint32 // // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vncvt_x_x_w_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint32m8_t src, size_t vl) { @@ -477,7 +477,7 @@ vint16m4_t test_vncvt_x_x_w_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint32 // // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vncvt_x_x_w_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { @@ -487,7 +487,7 @@ vuint16mf4_t test_vncvt_x_x_w_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vncvt_x_x_w_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t src, size_t vl) { @@ -497,7 +497,7 @@ vuint16mf2_t test_vncvt_x_x_w_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vncvt_x_x_w_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t src, size_t vl) { @@ -507,7 +507,7 @@ vuint16m1_t test_vncvt_x_x_w_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vui // // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vncvt_x_x_w_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t src, size_t vl) { @@ -517,7 +517,7 @@ vuint16m2_t test_vncvt_x_x_w_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vncvt_x_x_w_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t src, size_t vl) { @@ -527,7 +527,7 @@ vuint16m4_t test_vncvt_x_x_w_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vncvt_x_x_w_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t src, size_t vl) { @@ -537,7 +537,7 @@ vint32mf2_t test_vncvt_x_x_w_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vi // // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vncvt_x_x_w_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint64m2_t src, size_t vl) { @@ -547,7 +547,7 @@ vint32m1_t test_vncvt_x_x_w_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint6 // // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vncvt_x_x_w_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint64m4_t src, size_t vl) { @@ -557,7 +557,7 @@ vint32m2_t test_vncvt_x_x_w_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint6 // // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vncvt_x_x_w_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint64m8_t src, size_t vl) { @@ -567,7 +567,7 @@ vint32m4_t test_vncvt_x_x_w_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint64 // // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vncvt_x_x_w_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t src, size_t vl) { @@ -577,7 +577,7 @@ vuint32mf2_t test_vncvt_x_x_w_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vncvt_x_x_w_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t src, size_t vl) { @@ -587,7 +587,7 @@ vuint32m1_t test_vncvt_x_x_w_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vui // // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vncvt_x_x_w_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t src, size_t vl) { @@ -597,7 +597,7 @@ vuint32m2_t test_vncvt_x_x_w_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vui // // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vncvt_x_x_w_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t src, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c index 2e11915..ed5ab71 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c @@ -228,7 +228,7 @@ vint64m8_t test_vneg_v_i64m8 (vint64m8_t op1, size_t vl) { // // CHECK-RV64-LABEL: @test_vneg_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vneg_v_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) { @@ -238,7 +238,7 @@ vint8mf8_t test_vneg_v_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t // // CHECK-RV64-LABEL: @test_vneg_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vneg_v_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) { @@ -248,7 +248,7 @@ vint8mf4_t test_vneg_v_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t // // CHECK-RV64-LABEL: @test_vneg_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vneg_v_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) { @@ -258,7 +258,7 @@ vint8mf2_t test_vneg_v_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t // // CHECK-RV64-LABEL: @test_vneg_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vneg_v_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t vl) { @@ -268,7 +268,7 @@ vint8m1_t test_vneg_v_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, // // CHECK-RV64-LABEL: @test_vneg_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vneg_v_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t vl) { @@ -278,7 +278,7 @@ vint8m2_t test_vneg_v_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, // // CHECK-RV64-LABEL: @test_vneg_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vneg_v_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t vl) { @@ -288,7 +288,7 @@ vint8m4_t test_vneg_v_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, // // CHECK-RV64-LABEL: @test_vneg_v_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vneg_v_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t vl) { @@ -298,7 +298,7 @@ vint8m8_t test_vneg_v_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, // // CHECK-RV64-LABEL: @test_vneg_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vneg_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl) { @@ -308,7 +308,7 @@ vint16mf4_t test_vneg_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vneg_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vneg_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl) { @@ -318,7 +318,7 @@ vint16mf2_t test_vneg_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vneg_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vneg_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t vl) { @@ -328,7 +328,7 @@ vint16m1_t test_vneg_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t // // CHECK-RV64-LABEL: @test_vneg_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vneg_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t vl) { @@ -338,7 +338,7 @@ vint16m2_t test_vneg_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // // CHECK-RV64-LABEL: @test_vneg_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vneg_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t vl) { @@ -348,7 +348,7 @@ vint16m4_t test_vneg_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // // CHECK-RV64-LABEL: @test_vneg_v_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vneg_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t vl) { @@ -358,7 +358,7 @@ vint16m8_t test_vneg_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // // CHECK-RV64-LABEL: @test_vneg_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vneg_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl) { @@ -368,7 +368,7 @@ vint32mf2_t test_vneg_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32m // // CHECK-RV64-LABEL: @test_vneg_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vneg_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t vl) { @@ -378,7 +378,7 @@ vint32m1_t test_vneg_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t // // CHECK-RV64-LABEL: @test_vneg_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vneg_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t vl) { @@ -388,7 +388,7 @@ vint32m2_t test_vneg_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t // // CHECK-RV64-LABEL: @test_vneg_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vneg_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t vl) { @@ -398,7 +398,7 @@ vint32m4_t test_vneg_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // // CHECK-RV64-LABEL: @test_vneg_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vneg_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t vl) { @@ -408,7 +408,7 @@ vint32m8_t test_vneg_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // // CHECK-RV64-LABEL: @test_vneg_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vneg_v_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t vl) { @@ -418,7 +418,7 @@ vint64m1_t test_vneg_v_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t // // CHECK-RV64-LABEL: @test_vneg_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vneg_v_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t vl) { @@ -428,7 +428,7 @@ vint64m2_t test_vneg_v_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t // // CHECK-RV64-LABEL: @test_vneg_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vneg_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t vl) { @@ -438,7 +438,7 @@ vint64m4_t test_vneg_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t // // CHECK-RV64-LABEL: @test_vneg_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vneg_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsac.c index f8b12b2..1a4a68b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsac.c @@ -887,7 +887,7 @@ vuint64m8_t test_vnmsac_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, // // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsac_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -897,7 +897,7 @@ vint8mf8_t test_vnmsac_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1 // // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsac_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) { @@ -907,7 +907,7 @@ vint8mf8_t test_vnmsac_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vi // // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsac_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -917,7 +917,7 @@ vint8mf4_t test_vnmsac_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1 // // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsac_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) { @@ -927,7 +927,7 @@ vint8mf4_t test_vnmsac_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vi // // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsac_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -937,7 +937,7 @@ vint8mf2_t test_vnmsac_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1 // // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsac_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) { @@ -947,7 +947,7 @@ vint8mf2_t test_vnmsac_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vi // // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsac_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -957,7 +957,7 @@ vint8m1_t test_vnmsac_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vin // // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsac_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) { @@ -967,7 +967,7 @@ vint8m1_t test_vnmsac_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m // // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsac_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -977,7 +977,7 @@ vint8m2_t test_vnmsac_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vin // // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsac_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) { @@ -987,7 +987,7 @@ vint8m2_t test_vnmsac_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m // // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsac_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -997,7 +997,7 @@ vint8m4_t test_vnmsac_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vin // // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsac_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) { @@ -1007,7 +1007,7 @@ vint8m4_t test_vnmsac_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m // // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsac_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -1017,7 +1017,7 @@ vint8m8_t test_vnmsac_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vin // // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsac_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) { @@ -1027,7 +1027,7 @@ vint8m8_t test_vnmsac_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m // // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsac_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -1037,7 +1037,7 @@ vint16mf4_t test_vnmsac_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t // // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsac_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) { @@ -1047,7 +1047,7 @@ vint16mf4_t test_vnmsac_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1 // // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsac_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -1057,7 +1057,7 @@ vint16mf2_t test_vnmsac_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t // // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsac_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) { @@ -1067,7 +1067,7 @@ vint16mf2_t test_vnmsac_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1 // // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsac_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -1077,7 +1077,7 @@ vint16m1_t test_vnmsac_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1 // // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsac_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) { @@ -1087,7 +1087,7 @@ vint16m1_t test_vnmsac_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, v // // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsac_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -1097,7 +1097,7 @@ vint16m2_t test_vnmsac_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, // // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsac_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) { @@ -1107,7 +1107,7 @@ vint16m2_t test_vnmsac_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vi // // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsac_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1117,7 +1117,7 @@ vint16m4_t test_vnmsac_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, // // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsac_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) { @@ -1127,7 +1127,7 @@ vint16m4_t test_vnmsac_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vi // // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsac_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -1137,7 +1137,7 @@ vint16m8_t test_vnmsac_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, // // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsac_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) { @@ -1147,7 +1147,7 @@ vint16m8_t test_vnmsac_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vi // // CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsac_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1157,7 +1157,7 @@ vint32mf2_t test_vnmsac_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t // // CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsac_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) { @@ -1167,7 +1167,7 @@ vint32mf2_t test_vnmsac_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1 // // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsac_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1177,7 +1177,7 @@ vint32m1_t test_vnmsac_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1 // // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsac_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) { @@ -1187,7 +1187,7 @@ vint32m1_t test_vnmsac_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, v // // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsac_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1197,7 +1197,7 @@ vint32m2_t test_vnmsac_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1 // // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsac_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) { @@ -1207,7 +1207,7 @@ vint32m2_t test_vnmsac_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, v // // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsac_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1217,7 +1217,7 @@ vint32m4_t test_vnmsac_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, // // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsac_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) { @@ -1227,7 +1227,7 @@ vint32m4_t test_vnmsac_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vi // // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsac_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1237,7 +1237,7 @@ vint32m8_t test_vnmsac_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, // // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsac_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) { @@ -1247,7 +1247,7 @@ vint32m8_t test_vnmsac_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vi // // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsac_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -1257,7 +1257,7 @@ vint64m1_t test_vnmsac_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1 // // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsac_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) { @@ -1267,7 +1267,7 @@ vint64m1_t test_vnmsac_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, v // // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsac_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -1277,7 +1277,7 @@ vint64m2_t test_vnmsac_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1 // // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsac_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) { @@ -1287,7 +1287,7 @@ vint64m2_t test_vnmsac_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, v // // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsac_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -1297,7 +1297,7 @@ vint64m4_t test_vnmsac_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1 // // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsac_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) { @@ -1307,7 +1307,7 @@ vint64m4_t test_vnmsac_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, v // // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsac_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -1317,7 +1317,7 @@ vint64m8_t test_vnmsac_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, // // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsac_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) { @@ -1327,7 +1327,7 @@ vint64m8_t test_vnmsac_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vi // // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsac_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1337,7 +1337,7 @@ vuint8mf8_t test_vnmsac_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t // // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsac_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1347,7 +1347,7 @@ vuint8mf8_t test_vnmsac_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, // // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsac_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1357,7 +1357,7 @@ vuint8mf4_t test_vnmsac_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t // // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsac_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) { @@ -1367,7 +1367,7 @@ vuint8mf4_t test_vnmsac_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, // // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsac_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1377,7 +1377,7 @@ vuint8mf2_t test_vnmsac_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t // // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsac_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) { @@ -1387,7 +1387,7 @@ vuint8mf2_t test_vnmsac_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, // // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsac_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1397,7 +1397,7 @@ vuint8m1_t test_vnmsac_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, // // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsac_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) { @@ -1407,7 +1407,7 @@ vuint8m1_t test_vnmsac_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vui // // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsac_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1417,7 +1417,7 @@ vuint8m2_t test_vnmsac_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, // // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsac_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) { @@ -1427,7 +1427,7 @@ vuint8m2_t test_vnmsac_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vui // // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsac_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -1437,7 +1437,7 @@ vuint8m4_t test_vnmsac_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, // // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsac_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) { @@ -1447,7 +1447,7 @@ vuint8m4_t test_vnmsac_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vui // // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsac_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -1457,7 +1457,7 @@ vuint8m8_t test_vnmsac_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, // // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsac_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) { @@ -1467,7 +1467,7 @@ vuint8m8_t test_vnmsac_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vui // // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsac_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -1477,7 +1477,7 @@ vuint16mf4_t test_vnmsac_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf // // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsac_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) { @@ -1487,7 +1487,7 @@ vuint16mf4_t test_vnmsac_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t // // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsac_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -1497,7 +1497,7 @@ vuint16mf2_t test_vnmsac_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf // // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsac_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) { @@ -1507,7 +1507,7 @@ vuint16mf2_t test_vnmsac_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t // // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsac_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -1517,7 +1517,7 @@ vuint16m1_t test_vnmsac_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t // // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsac_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) { @@ -1527,7 +1527,7 @@ vuint16m1_t test_vnmsac_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1 // // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsac_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -1537,7 +1537,7 @@ vuint16m2_t test_vnmsac_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t o // // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsac_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) { @@ -1547,7 +1547,7 @@ vuint16m2_t test_vnmsac_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, // // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsac_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -1557,7 +1557,7 @@ vuint16m4_t test_vnmsac_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t o // // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsac_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) { @@ -1567,7 +1567,7 @@ vuint16m4_t test_vnmsac_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, // // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsac_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -1577,7 +1577,7 @@ vuint16m8_t test_vnmsac_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t o // // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsac_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) { @@ -1587,7 +1587,7 @@ vuint16m8_t test_vnmsac_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, // // CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsac_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1597,7 +1597,7 @@ vuint32mf2_t test_vnmsac_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf // // CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsac_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) { @@ -1607,7 +1607,7 @@ vuint32mf2_t test_vnmsac_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t // // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsac_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1617,7 +1617,7 @@ vuint32m1_t test_vnmsac_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t // // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsac_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) { @@ -1627,7 +1627,7 @@ vuint32m1_t test_vnmsac_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1 // // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsac_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1637,7 +1637,7 @@ vuint32m2_t test_vnmsac_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t // // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsac_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) { @@ -1647,7 +1647,7 @@ vuint32m2_t test_vnmsac_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1 // // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsac_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1657,7 +1657,7 @@ vuint32m4_t test_vnmsac_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t o // // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsac_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) { @@ -1667,7 +1667,7 @@ vuint32m4_t test_vnmsac_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, // // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsac_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1677,7 +1677,7 @@ vuint32m8_t test_vnmsac_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t o // // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsac_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) { @@ -1687,7 +1687,7 @@ vuint32m8_t test_vnmsac_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, // // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsac_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -1697,7 +1697,7 @@ vuint64m1_t test_vnmsac_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t // // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsac_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) { @@ -1707,7 +1707,7 @@ vuint64m1_t test_vnmsac_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1 // // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsac_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -1717,7 +1717,7 @@ vuint64m2_t test_vnmsac_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t // // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsac_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) { @@ -1727,7 +1727,7 @@ vuint64m2_t test_vnmsac_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1 // // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsac_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -1737,7 +1737,7 @@ vuint64m4_t test_vnmsac_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t // // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsac_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) { @@ -1747,7 +1747,7 @@ vuint64m4_t test_vnmsac_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1 // // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsac_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1757,7 +1757,7 @@ vuint64m8_t test_vnmsac_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t o // // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsac_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsub.c index 2106b23..2e6c5e3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsub.c @@ -887,7 +887,7 @@ vuint64m8_t test_vnmsub_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, // // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -897,7 +897,7 @@ vint8mf8_t test_vnmsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1 // // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) { @@ -907,7 +907,7 @@ vint8mf8_t test_vnmsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vi // // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -917,7 +917,7 @@ vint8mf4_t test_vnmsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1 // // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) { @@ -927,7 +927,7 @@ vint8mf4_t test_vnmsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vi // // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -937,7 +937,7 @@ vint8mf2_t test_vnmsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1 // // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) { @@ -947,7 +947,7 @@ vint8mf2_t test_vnmsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vi // // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsub_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -957,7 +957,7 @@ vint8m1_t test_vnmsub_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vin // // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsub_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) { @@ -967,7 +967,7 @@ vint8m1_t test_vnmsub_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m // // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsub_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -977,7 +977,7 @@ vint8m2_t test_vnmsub_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vin // // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsub_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) { @@ -987,7 +987,7 @@ vint8m2_t test_vnmsub_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m // // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsub_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -997,7 +997,7 @@ vint8m4_t test_vnmsub_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vin // // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsub_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) { @@ -1007,7 +1007,7 @@ vint8m4_t test_vnmsub_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m // // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsub_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -1017,7 +1017,7 @@ vint8m8_t test_vnmsub_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vin // // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsub_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) { @@ -1027,7 +1027,7 @@ vint8m8_t test_vnmsub_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m // // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -1037,7 +1037,7 @@ vint16mf4_t test_vnmsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t // // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) { @@ -1047,7 +1047,7 @@ vint16mf4_t test_vnmsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1 // // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -1057,7 +1057,7 @@ vint16mf2_t test_vnmsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t // // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) { @@ -1067,7 +1067,7 @@ vint16mf2_t test_vnmsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1 // // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsub_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -1077,7 +1077,7 @@ vint16m1_t test_vnmsub_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1 // // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsub_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) { @@ -1087,7 +1087,7 @@ vint16m1_t test_vnmsub_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, v // // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsub_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -1097,7 +1097,7 @@ vint16m2_t test_vnmsub_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, // // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsub_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) { @@ -1107,7 +1107,7 @@ vint16m2_t test_vnmsub_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vi // // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsub_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1117,7 +1117,7 @@ vint16m4_t test_vnmsub_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, // // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsub_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) { @@ -1127,7 +1127,7 @@ vint16m4_t test_vnmsub_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vi // // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsub_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -1137,7 +1137,7 @@ vint16m8_t test_vnmsub_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, // // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsub_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) { @@ -1147,7 +1147,7 @@ vint16m8_t test_vnmsub_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vi // // CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1157,7 +1157,7 @@ vint32mf2_t test_vnmsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t // // CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) { @@ -1167,7 +1167,7 @@ vint32mf2_t test_vnmsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1 // // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsub_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1177,7 +1177,7 @@ vint32m1_t test_vnmsub_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1 // // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsub_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) { @@ -1187,7 +1187,7 @@ vint32m1_t test_vnmsub_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, v // // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsub_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1197,7 +1197,7 @@ vint32m2_t test_vnmsub_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1 // // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsub_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) { @@ -1207,7 +1207,7 @@ vint32m2_t test_vnmsub_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, v // // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsub_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1217,7 +1217,7 @@ vint32m4_t test_vnmsub_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, // // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsub_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) { @@ -1227,7 +1227,7 @@ vint32m4_t test_vnmsub_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vi // // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsub_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1237,7 +1237,7 @@ vint32m8_t test_vnmsub_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, // // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsub_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) { @@ -1247,7 +1247,7 @@ vint32m8_t test_vnmsub_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vi // // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsub_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -1257,7 +1257,7 @@ vint64m1_t test_vnmsub_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1 // // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsub_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) { @@ -1267,7 +1267,7 @@ vint64m1_t test_vnmsub_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, v // // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsub_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -1277,7 +1277,7 @@ vint64m2_t test_vnmsub_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1 // // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsub_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) { @@ -1287,7 +1287,7 @@ vint64m2_t test_vnmsub_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, v // // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsub_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -1297,7 +1297,7 @@ vint64m4_t test_vnmsub_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1 // // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsub_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) { @@ -1307,7 +1307,7 @@ vint64m4_t test_vnmsub_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, v // // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsub_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -1317,7 +1317,7 @@ vint64m8_t test_vnmsub_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, // // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsub_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) { @@ -1327,7 +1327,7 @@ vint64m8_t test_vnmsub_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vi // // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1337,7 +1337,7 @@ vuint8mf8_t test_vnmsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t // // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1347,7 +1347,7 @@ vuint8mf8_t test_vnmsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, // // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1357,7 +1357,7 @@ vuint8mf4_t test_vnmsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t // // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) { @@ -1367,7 +1367,7 @@ vuint8mf4_t test_vnmsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, // // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1377,7 +1377,7 @@ vuint8mf2_t test_vnmsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t // // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) { @@ -1387,7 +1387,7 @@ vuint8mf2_t test_vnmsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, // // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1397,7 +1397,7 @@ vuint8m1_t test_vnmsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, // // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) { @@ -1407,7 +1407,7 @@ vuint8m1_t test_vnmsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vui // // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1417,7 +1417,7 @@ vuint8m2_t test_vnmsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, // // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) { @@ -1427,7 +1427,7 @@ vuint8m2_t test_vnmsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vui // // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -1437,7 +1437,7 @@ vuint8m4_t test_vnmsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, // // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) { @@ -1447,7 +1447,7 @@ vuint8m4_t test_vnmsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vui // // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -1457,7 +1457,7 @@ vuint8m8_t test_vnmsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, // // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) { @@ -1467,7 +1467,7 @@ vuint8m8_t test_vnmsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vui // // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -1477,7 +1477,7 @@ vuint16mf4_t test_vnmsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf // // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) { @@ -1487,7 +1487,7 @@ vuint16mf4_t test_vnmsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t // // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -1497,7 +1497,7 @@ vuint16mf2_t test_vnmsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf // // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) { @@ -1507,7 +1507,7 @@ vuint16mf2_t test_vnmsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t // // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -1517,7 +1517,7 @@ vuint16m1_t test_vnmsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t // // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) { @@ -1527,7 +1527,7 @@ vuint16m1_t test_vnmsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1 // // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -1537,7 +1537,7 @@ vuint16m2_t test_vnmsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t o // // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) { @@ -1547,7 +1547,7 @@ vuint16m2_t test_vnmsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, // // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -1557,7 +1557,7 @@ vuint16m4_t test_vnmsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t o // // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) { @@ -1567,7 +1567,7 @@ vuint16m4_t test_vnmsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, // // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -1577,7 +1577,7 @@ vuint16m8_t test_vnmsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t o // // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) { @@ -1587,7 +1587,7 @@ vuint16m8_t test_vnmsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, // // CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1597,7 +1597,7 @@ vuint32mf2_t test_vnmsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf // // CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) { @@ -1607,7 +1607,7 @@ vuint32mf2_t test_vnmsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t // // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1617,7 +1617,7 @@ vuint32m1_t test_vnmsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t // // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) { @@ -1627,7 +1627,7 @@ vuint32m1_t test_vnmsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1 // // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1637,7 +1637,7 @@ vuint32m2_t test_vnmsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t // // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) { @@ -1647,7 +1647,7 @@ vuint32m2_t test_vnmsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1 // // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1657,7 +1657,7 @@ vuint32m4_t test_vnmsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t o // // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) { @@ -1667,7 +1667,7 @@ vuint32m4_t test_vnmsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, // // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1677,7 +1677,7 @@ vuint32m8_t test_vnmsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t o // // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) { @@ -1687,7 +1687,7 @@ vuint32m8_t test_vnmsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, // // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -1697,7 +1697,7 @@ vuint64m1_t test_vnmsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t // // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) { @@ -1707,7 +1707,7 @@ vuint64m1_t test_vnmsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1 // // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -1717,7 +1717,7 @@ vuint64m2_t test_vnmsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t // // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) { @@ -1727,7 +1727,7 @@ vuint64m2_t test_vnmsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1 // // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -1737,7 +1737,7 @@ vuint64m4_t test_vnmsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t // // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) { @@ -1747,7 +1747,7 @@ vuint64m4_t test_vnmsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1 // // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1757,7 +1757,7 @@ vuint64m8_t test_vnmsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t o // // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c index 7a08334..391fc5c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c @@ -448,7 +448,7 @@ vuint64m8_t test_vnot_v_u64m8 (vuint64m8_t op1, size_t vl) { // // CHECK-RV64-LABEL: @test_vnot_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnot_v_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) { @@ -458,7 +458,7 @@ vint8mf8_t test_vnot_v_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t // // CHECK-RV64-LABEL: @test_vnot_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnot_v_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) { @@ -468,7 +468,7 @@ vint8mf4_t test_vnot_v_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t // // CHECK-RV64-LABEL: @test_vnot_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnot_v_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) { @@ -478,7 +478,7 @@ vint8mf2_t test_vnot_v_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t // // CHECK-RV64-LABEL: @test_vnot_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnot_v_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t vl) { @@ -488,7 +488,7 @@ vint8m1_t test_vnot_v_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, // // CHECK-RV64-LABEL: @test_vnot_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnot_v_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t vl) { @@ -498,7 +498,7 @@ vint8m2_t test_vnot_v_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, // // CHECK-RV64-LABEL: @test_vnot_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnot_v_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t vl) { @@ -508,7 +508,7 @@ vint8m4_t test_vnot_v_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, // // CHECK-RV64-LABEL: @test_vnot_v_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnot_v_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t vl) { @@ -518,7 +518,7 @@ vint8m8_t test_vnot_v_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, // // CHECK-RV64-LABEL: @test_vnot_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnot_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl) { @@ -528,7 +528,7 @@ vint16mf4_t test_vnot_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vnot_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnot_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl) { @@ -538,7 +538,7 @@ vint16mf2_t test_vnot_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vnot_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnot_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t vl) { @@ -548,7 +548,7 @@ vint16m1_t test_vnot_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t // // CHECK-RV64-LABEL: @test_vnot_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnot_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t vl) { @@ -558,7 +558,7 @@ vint16m2_t test_vnot_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // // CHECK-RV64-LABEL: @test_vnot_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnot_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t vl) { @@ -568,7 +568,7 @@ vint16m4_t test_vnot_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // // CHECK-RV64-LABEL: @test_vnot_v_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnot_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t vl) { @@ -578,7 +578,7 @@ vint16m8_t test_vnot_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // // CHECK-RV64-LABEL: @test_vnot_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnot_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl) { @@ -588,7 +588,7 @@ vint32mf2_t test_vnot_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32m // // CHECK-RV64-LABEL: @test_vnot_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnot_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t vl) { @@ -598,7 +598,7 @@ vint32m1_t test_vnot_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t // // CHECK-RV64-LABEL: @test_vnot_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnot_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t vl) { @@ -608,7 +608,7 @@ vint32m2_t test_vnot_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t // // CHECK-RV64-LABEL: @test_vnot_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnot_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t vl) { @@ -618,7 +618,7 @@ vint32m4_t test_vnot_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // // CHECK-RV64-LABEL: @test_vnot_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnot_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t vl) { @@ -628,7 +628,7 @@ vint32m8_t test_vnot_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // // CHECK-RV64-LABEL: @test_vnot_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnot_v_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t vl) { @@ -638,7 +638,7 @@ vint64m1_t test_vnot_v_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t // // CHECK-RV64-LABEL: @test_vnot_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnot_v_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t vl) { @@ -648,7 +648,7 @@ vint64m2_t test_vnot_v_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t // // CHECK-RV64-LABEL: @test_vnot_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnot_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t vl) { @@ -658,7 +658,7 @@ vint64m4_t test_vnot_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t // // CHECK-RV64-LABEL: @test_vnot_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnot_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t vl) { @@ -668,7 +668,7 @@ vint64m8_t test_vnot_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // // CHECK-RV64-LABEL: @test_vnot_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnot_v_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t vl) { @@ -678,7 +678,7 @@ vuint8mf8_t test_vnot_v_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf // // CHECK-RV64-LABEL: @test_vnot_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnot_v_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t vl) { @@ -688,7 +688,7 @@ vuint8mf4_t test_vnot_v_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf // // CHECK-RV64-LABEL: @test_vnot_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnot_v_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t vl) { @@ -698,7 +698,7 @@ vuint8mf2_t test_vnot_v_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf // // CHECK-RV64-LABEL: @test_vnot_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnot_v_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t vl) { @@ -708,7 +708,7 @@ vuint8m1_t test_vnot_v_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t o // // CHECK-RV64-LABEL: @test_vnot_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnot_v_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t vl) { @@ -718,7 +718,7 @@ vuint8m2_t test_vnot_v_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t o // // CHECK-RV64-LABEL: @test_vnot_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnot_v_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t vl) { @@ -728,7 +728,7 @@ vuint8m4_t test_vnot_v_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t o // // CHECK-RV64-LABEL: @test_vnot_v_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnot_v_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t vl) { @@ -738,7 +738,7 @@ vuint8m8_t test_vnot_v_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t o // // CHECK-RV64-LABEL: @test_vnot_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnot_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t vl) { @@ -748,7 +748,7 @@ vuint16mf4_t test_vnot_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vnot_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnot_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t vl) { @@ -758,7 +758,7 @@ vuint16mf2_t test_vnot_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vnot_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnot_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t vl) { @@ -768,7 +768,7 @@ vuint16m1_t test_vnot_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m // // CHECK-RV64-LABEL: @test_vnot_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnot_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t vl) { @@ -778,7 +778,7 @@ vuint16m2_t test_vnot_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2 // // CHECK-RV64-LABEL: @test_vnot_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnot_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t vl) { @@ -788,7 +788,7 @@ vuint16m4_t test_vnot_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4 // // CHECK-RV64-LABEL: @test_vnot_v_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnot_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t vl) { @@ -798,7 +798,7 @@ vuint16m8_t test_vnot_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8 // // CHECK-RV64-LABEL: @test_vnot_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnot_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t vl) { @@ -808,7 +808,7 @@ vuint32mf2_t test_vnot_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vnot_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnot_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t vl) { @@ -818,7 +818,7 @@ vuint32m1_t test_vnot_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vnot_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnot_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t vl) { @@ -828,7 +828,7 @@ vuint32m2_t test_vnot_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vnot_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnot_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t vl) { @@ -838,7 +838,7 @@ vuint32m4_t test_vnot_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4 // // CHECK-RV64-LABEL: @test_vnot_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnot_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t vl) { @@ -848,7 +848,7 @@ vuint32m8_t test_vnot_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8 // // CHECK-RV64-LABEL: @test_vnot_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnot_v_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t vl) { @@ -858,7 +858,7 @@ vuint64m1_t test_vnot_v_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vnot_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnot_v_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t vl) { @@ -868,7 +868,7 @@ vuint64m2_t test_vnot_v_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vnot_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnot_v_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t vl) { @@ -878,7 +878,7 @@ vuint64m4_t test_vnot_v_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vnot_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnot_v_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c index a90d69e..e76b076 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c @@ -307,7 +307,7 @@ vint32m4_t test_vnsra_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) { // // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnsra_wv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { @@ -317,7 +317,7 @@ vint8mf8_t test_vnsra_wv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4 // // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnsra_wx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { @@ -327,7 +327,7 @@ vint8mf8_t test_vnsra_wx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4 // // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnsra_wv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { @@ -337,7 +337,7 @@ vint8mf4_t test_vnsra_wv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2 // // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnsra_wx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { @@ -347,7 +347,7 @@ vint8mf4_t test_vnsra_wx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2 // // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnsra_wv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { @@ -357,7 +357,7 @@ vint8mf2_t test_vnsra_wv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_ // // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnsra_wx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { @@ -367,7 +367,7 @@ vint8mf2_t test_vnsra_wx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_ // // CHECK-RV64-LABEL: @test_vnsra_wv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnsra_wv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) { @@ -377,7 +377,7 @@ vint8m1_t test_vnsra_wv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op // // CHECK-RV64-LABEL: @test_vnsra_wx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnsra_wx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { @@ -387,7 +387,7 @@ vint8m1_t test_vnsra_wx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op // // CHECK-RV64-LABEL: @test_vnsra_wv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnsra_wv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) { @@ -397,7 +397,7 @@ vint8m2_t test_vnsra_wv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op // // CHECK-RV64-LABEL: @test_vnsra_wx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnsra_wx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { @@ -407,7 +407,7 @@ vint8m2_t test_vnsra_wx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op // // CHECK-RV64-LABEL: @test_vnsra_wv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnsra_wv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) { @@ -417,7 +417,7 @@ vint8m4_t test_vnsra_wv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op // // CHECK-RV64-LABEL: @test_vnsra_wx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnsra_wx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { @@ -427,7 +427,7 @@ vint8m4_t test_vnsra_wx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op // // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnsra_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { @@ -437,7 +437,7 @@ vint16mf4_t test_vnsra_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint32 // // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnsra_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { @@ -447,7 +447,7 @@ vint16mf4_t test_vnsra_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint32 // // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnsra_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { @@ -457,7 +457,7 @@ vint16mf2_t test_vnsra_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint32 // // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnsra_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { @@ -467,7 +467,7 @@ vint16mf2_t test_vnsra_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint32 // // CHECK-RV64-LABEL: @test_vnsra_wv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnsra_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) { @@ -477,7 +477,7 @@ vint16m1_t test_vnsra_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint32m2_ // // CHECK-RV64-LABEL: @test_vnsra_wx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnsra_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { @@ -487,7 +487,7 @@ vint16m1_t test_vnsra_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint32m2_ // // CHECK-RV64-LABEL: @test_vnsra_wv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnsra_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) { @@ -497,7 +497,7 @@ vint16m2_t test_vnsra_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t // // CHECK-RV64-LABEL: @test_vnsra_wx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnsra_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { @@ -507,7 +507,7 @@ vint16m2_t test_vnsra_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t // // CHECK-RV64-LABEL: @test_vnsra_wv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnsra_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) { @@ -517,7 +517,7 @@ vint16m4_t test_vnsra_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t // // CHECK-RV64-LABEL: @test_vnsra_wx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnsra_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { @@ -527,7 +527,7 @@ vint16m4_t test_vnsra_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t // // CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnsra_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { @@ -537,7 +537,7 @@ vint32mf2_t test_vnsra_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint64 // // CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnsra_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { @@ -547,7 +547,7 @@ vint32mf2_t test_vnsra_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint64 // // CHECK-RV64-LABEL: @test_vnsra_wv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnsra_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { @@ -557,7 +557,7 @@ vint32m1_t test_vnsra_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint64m2_ // // CHECK-RV64-LABEL: @test_vnsra_wx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnsra_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { @@ -567,7 +567,7 @@ vint32m1_t test_vnsra_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint64m2_ // // CHECK-RV64-LABEL: @test_vnsra_wv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnsra_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) { @@ -577,7 +577,7 @@ vint32m2_t test_vnsra_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint64m4_ // // CHECK-RV64-LABEL: @test_vnsra_wx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnsra_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { @@ -587,7 +587,7 @@ vint32m2_t test_vnsra_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint64m4_ // // CHECK-RV64-LABEL: @test_vnsra_wv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnsra_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) { @@ -597,7 +597,7 @@ vint32m4_t test_vnsra_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t // // CHECK-RV64-LABEL: @test_vnsra_wx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnsra_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c index 12e0482..e478bec 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c @@ -307,7 +307,7 @@ vuint32m4_t test_vnsrl_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) { // // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnsrl_wv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { @@ -317,7 +317,7 @@ vuint8mf8_t test_vnsrl_wv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint16 // // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnsrl_wx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { @@ -327,7 +327,7 @@ vuint8mf8_t test_vnsrl_wx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint16 // // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnsrl_wv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { @@ -337,7 +337,7 @@ vuint8mf4_t test_vnsrl_wv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint16 // // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnsrl_wx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { @@ -347,7 +347,7 @@ vuint8mf4_t test_vnsrl_wx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint16 // // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnsrl_wv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { @@ -357,7 +357,7 @@ vuint8mf2_t test_vnsrl_wv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint16 // // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnsrl_wx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { @@ -367,7 +367,7 @@ vuint8mf2_t test_vnsrl_wx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint16 // // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnsrl_wv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { @@ -377,7 +377,7 @@ vuint8m1_t test_vnsrl_wv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t // // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnsrl_wx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { @@ -387,7 +387,7 @@ vuint8m1_t test_vnsrl_wx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t // // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnsrl_wv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { @@ -397,7 +397,7 @@ vuint8m2_t test_vnsrl_wv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t // // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnsrl_wx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { @@ -407,7 +407,7 @@ vuint8m2_t test_vnsrl_wx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t // // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnsrl_wv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { @@ -417,7 +417,7 @@ vuint8m4_t test_vnsrl_wv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t // // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnsrl_wx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { @@ -427,7 +427,7 @@ vuint8m4_t test_vnsrl_wx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t // // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnsrl_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { @@ -437,7 +437,7 @@ vuint16mf4_t test_vnsrl_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnsrl_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { @@ -447,7 +447,7 @@ vuint16mf4_t test_vnsrl_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnsrl_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { @@ -457,7 +457,7 @@ vuint16mf2_t test_vnsrl_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnsrl_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { @@ -467,7 +467,7 @@ vuint16mf2_t test_vnsrl_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnsrl_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { @@ -477,7 +477,7 @@ vuint16m1_t test_vnsrl_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint32 // // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnsrl_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { @@ -487,7 +487,7 @@ vuint16m1_t test_vnsrl_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint32 // // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnsrl_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { @@ -497,7 +497,7 @@ vuint16m2_t test_vnsrl_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnsrl_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { @@ -507,7 +507,7 @@ vuint16m2_t test_vnsrl_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnsrl_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { @@ -517,7 +517,7 @@ vuint16m4_t test_vnsrl_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnsrl_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { @@ -527,7 +527,7 @@ vuint16m4_t test_vnsrl_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnsrl_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { @@ -537,7 +537,7 @@ vuint32mf2_t test_vnsrl_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnsrl_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { @@ -547,7 +547,7 @@ vuint32mf2_t test_vnsrl_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnsrl_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { @@ -557,7 +557,7 @@ vuint32m1_t test_vnsrl_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint64 // // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnsrl_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { @@ -567,7 +567,7 @@ vuint32m1_t test_vnsrl_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint64 // // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnsrl_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { @@ -577,7 +577,7 @@ vuint32m2_t test_vnsrl_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint64 // // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnsrl_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { @@ -587,7 +587,7 @@ vuint32m2_t test_vnsrl_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint64 // // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnsrl_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { @@ -597,7 +597,7 @@ vuint32m4_t test_vnsrl_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnsrl_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c index b7dc8c8..6a84e87 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c @@ -887,7 +887,7 @@ vuint64m8_t test_vor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { // // CHECK-RV64-LABEL: @test_vor_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -897,7 +897,7 @@ vint8mf8_t test_vor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t // // CHECK-RV64-LABEL: @test_vor_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { @@ -907,7 +907,7 @@ vint8mf8_t test_vor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t // // CHECK-RV64-LABEL: @test_vor_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -917,7 +917,7 @@ vint8mf4_t test_vor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t // // CHECK-RV64-LABEL: @test_vor_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { @@ -927,7 +927,7 @@ vint8mf4_t test_vor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t // // CHECK-RV64-LABEL: @test_vor_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -937,7 +937,7 @@ vint8mf2_t test_vor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t // // CHECK-RV64-LABEL: @test_vor_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { @@ -947,7 +947,7 @@ vint8mf2_t test_vor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t // // CHECK-RV64-LABEL: @test_vor_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vor_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -957,7 +957,7 @@ vint8m1_t test_vor_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, // // CHECK-RV64-LABEL: @test_vor_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vor_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { @@ -967,7 +967,7 @@ vint8m1_t test_vor_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, // // CHECK-RV64-LABEL: @test_vor_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vor_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -977,7 +977,7 @@ vint8m2_t test_vor_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, // // CHECK-RV64-LABEL: @test_vor_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vor_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { @@ -987,7 +987,7 @@ vint8m2_t test_vor_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, // // CHECK-RV64-LABEL: @test_vor_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vor_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -997,7 +997,7 @@ vint8m4_t test_vor_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, // // CHECK-RV64-LABEL: @test_vor_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vor_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { @@ -1007,7 +1007,7 @@ vint8m4_t test_vor_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, // // CHECK-RV64-LABEL: @test_vor_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vor_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -1017,7 +1017,7 @@ vint8m8_t test_vor_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, // // CHECK-RV64-LABEL: @test_vor_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vor_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { @@ -1027,7 +1027,7 @@ vint8m8_t test_vor_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, // // CHECK-RV64-LABEL: @test_vor_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -1037,7 +1037,7 @@ vint16mf4_t test_vor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf // // CHECK-RV64-LABEL: @test_vor_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { @@ -1047,7 +1047,7 @@ vint16mf4_t test_vor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf // // CHECK-RV64-LABEL: @test_vor_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -1057,7 +1057,7 @@ vint16mf2_t test_vor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf // // CHECK-RV64-LABEL: @test_vor_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { @@ -1067,7 +1067,7 @@ vint16mf2_t test_vor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf // // CHECK-RV64-LABEL: @test_vor_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vor_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -1077,7 +1077,7 @@ vint16m1_t test_vor_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t // // CHECK-RV64-LABEL: @test_vor_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vor_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { @@ -1087,7 +1087,7 @@ vint16m1_t test_vor_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t // // CHECK-RV64-LABEL: @test_vor_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vor_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -1097,7 +1097,7 @@ vint16m2_t test_vor_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t o // // CHECK-RV64-LABEL: @test_vor_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vor_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { @@ -1107,7 +1107,7 @@ vint16m2_t test_vor_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t o // // CHECK-RV64-LABEL: @test_vor_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vor_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1117,7 +1117,7 @@ vint16m4_t test_vor_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t o // // CHECK-RV64-LABEL: @test_vor_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vor_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { @@ -1127,7 +1127,7 @@ vint16m4_t test_vor_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t o // // CHECK-RV64-LABEL: @test_vor_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vor_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -1137,7 +1137,7 @@ vint16m8_t test_vor_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t o // // CHECK-RV64-LABEL: @test_vor_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vor_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { @@ -1147,7 +1147,7 @@ vint16m8_t test_vor_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t o // // CHECK-RV64-LABEL: @test_vor_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1157,7 +1157,7 @@ vint32mf2_t test_vor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf // // CHECK-RV64-LABEL: @test_vor_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { @@ -1167,7 +1167,7 @@ vint32mf2_t test_vor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf // // CHECK-RV64-LABEL: @test_vor_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vor_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1177,7 +1177,7 @@ vint32m1_t test_vor_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t // // CHECK-RV64-LABEL: @test_vor_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vor_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { @@ -1187,7 +1187,7 @@ vint32m1_t test_vor_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t // // CHECK-RV64-LABEL: @test_vor_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vor_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1197,7 +1197,7 @@ vint32m2_t test_vor_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t // // CHECK-RV64-LABEL: @test_vor_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vor_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { @@ -1207,7 +1207,7 @@ vint32m2_t test_vor_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t // // CHECK-RV64-LABEL: @test_vor_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vor_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1217,7 +1217,7 @@ vint32m4_t test_vor_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t o // // CHECK-RV64-LABEL: @test_vor_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vor_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { @@ -1227,7 +1227,7 @@ vint32m4_t test_vor_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t o // // CHECK-RV64-LABEL: @test_vor_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vor_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1237,7 +1237,7 @@ vint32m8_t test_vor_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t o // // CHECK-RV64-LABEL: @test_vor_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vor_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { @@ -1247,7 +1247,7 @@ vint32m8_t test_vor_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t o // // CHECK-RV64-LABEL: @test_vor_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vor_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -1257,7 +1257,7 @@ vint64m1_t test_vor_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t // // CHECK-RV64-LABEL: @test_vor_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vor_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { @@ -1267,7 +1267,7 @@ vint64m1_t test_vor_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t // // CHECK-RV64-LABEL: @test_vor_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vor_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -1277,7 +1277,7 @@ vint64m2_t test_vor_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t // // CHECK-RV64-LABEL: @test_vor_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vor_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { @@ -1287,7 +1287,7 @@ vint64m2_t test_vor_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t // // CHECK-RV64-LABEL: @test_vor_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vor_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -1297,7 +1297,7 @@ vint64m4_t test_vor_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t // // CHECK-RV64-LABEL: @test_vor_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vor_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { @@ -1307,7 +1307,7 @@ vint64m4_t test_vor_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t // // CHECK-RV64-LABEL: @test_vor_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vor_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -1317,7 +1317,7 @@ vint64m8_t test_vor_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t o // // CHECK-RV64-LABEL: @test_vor_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vor_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { @@ -1327,7 +1327,7 @@ vint64m8_t test_vor_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t o // // CHECK-RV64-LABEL: @test_vor_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1337,7 +1337,7 @@ vuint8mf8_t test_vor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8 // // CHECK-RV64-LABEL: @test_vor_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -1347,7 +1347,7 @@ vuint8mf8_t test_vor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8 // // CHECK-RV64-LABEL: @test_vor_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1357,7 +1357,7 @@ vuint8mf4_t test_vor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4 // // CHECK-RV64-LABEL: @test_vor_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -1367,7 +1367,7 @@ vuint8mf4_t test_vor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4 // // CHECK-RV64-LABEL: @test_vor_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1377,7 +1377,7 @@ vuint8mf2_t test_vor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2 // // CHECK-RV64-LABEL: @test_vor_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -1387,7 +1387,7 @@ vuint8mf2_t test_vor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2 // // CHECK-RV64-LABEL: @test_vor_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vor_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1397,7 +1397,7 @@ vuint8m1_t test_vor_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op // // CHECK-RV64-LABEL: @test_vor_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vor_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -1407,7 +1407,7 @@ vuint8m1_t test_vor_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op // // CHECK-RV64-LABEL: @test_vor_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vor_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1417,7 +1417,7 @@ vuint8m2_t test_vor_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op // // CHECK-RV64-LABEL: @test_vor_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vor_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -1427,7 +1427,7 @@ vuint8m2_t test_vor_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op // // CHECK-RV64-LABEL: @test_vor_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vor_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -1437,7 +1437,7 @@ vuint8m4_t test_vor_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op // // CHECK-RV64-LABEL: @test_vor_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vor_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -1447,7 +1447,7 @@ vuint8m4_t test_vor_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op // // CHECK-RV64-LABEL: @test_vor_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vor_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -1457,7 +1457,7 @@ vuint8m8_t test_vor_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op // // CHECK-RV64-LABEL: @test_vor_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vor_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -1467,7 +1467,7 @@ vuint8m8_t test_vor_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op // // CHECK-RV64-LABEL: @test_vor_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -1477,7 +1477,7 @@ vuint16mf4_t test_vor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint1 // // CHECK-RV64-LABEL: @test_vor_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -1487,7 +1487,7 @@ vuint16mf4_t test_vor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint1 // // CHECK-RV64-LABEL: @test_vor_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -1497,7 +1497,7 @@ vuint16mf2_t test_vor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint1 // // CHECK-RV64-LABEL: @test_vor_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -1507,7 +1507,7 @@ vuint16mf2_t test_vor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint1 // // CHECK-RV64-LABEL: @test_vor_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vor_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -1517,7 +1517,7 @@ vuint16m1_t test_vor_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1 // // CHECK-RV64-LABEL: @test_vor_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vor_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -1527,7 +1527,7 @@ vuint16m1_t test_vor_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1 // // CHECK-RV64-LABEL: @test_vor_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vor_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -1537,7 +1537,7 @@ vuint16m2_t test_vor_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_ // // CHECK-RV64-LABEL: @test_vor_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vor_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -1547,7 +1547,7 @@ vuint16m2_t test_vor_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_ // // CHECK-RV64-LABEL: @test_vor_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vor_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -1557,7 +1557,7 @@ vuint16m4_t test_vor_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_ // // CHECK-RV64-LABEL: @test_vor_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vor_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -1567,7 +1567,7 @@ vuint16m4_t test_vor_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_ // // CHECK-RV64-LABEL: @test_vor_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vor_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -1577,7 +1577,7 @@ vuint16m8_t test_vor_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_ // // CHECK-RV64-LABEL: @test_vor_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vor_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -1587,7 +1587,7 @@ vuint16m8_t test_vor_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_ // // CHECK-RV64-LABEL: @test_vor_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1597,7 +1597,7 @@ vuint32mf2_t test_vor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint3 // // CHECK-RV64-LABEL: @test_vor_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1607,7 +1607,7 @@ vuint32mf2_t test_vor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint3 // // CHECK-RV64-LABEL: @test_vor_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vor_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1617,7 +1617,7 @@ vuint32m1_t test_vor_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1 // // CHECK-RV64-LABEL: @test_vor_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vor_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1627,7 +1627,7 @@ vuint32m1_t test_vor_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1 // // CHECK-RV64-LABEL: @test_vor_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vor_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1637,7 +1637,7 @@ vuint32m2_t test_vor_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2 // // CHECK-RV64-LABEL: @test_vor_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vor_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1647,7 +1647,7 @@ vuint32m2_t test_vor_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2 // // CHECK-RV64-LABEL: @test_vor_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vor_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1657,7 +1657,7 @@ vuint32m4_t test_vor_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_ // // CHECK-RV64-LABEL: @test_vor_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vor_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1667,7 +1667,7 @@ vuint32m4_t test_vor_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_ // // CHECK-RV64-LABEL: @test_vor_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vor_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1677,7 +1677,7 @@ vuint32m8_t test_vor_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_ // // CHECK-RV64-LABEL: @test_vor_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vor_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -1687,7 +1687,7 @@ vuint32m8_t test_vor_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_ // // CHECK-RV64-LABEL: @test_vor_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vor_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -1697,7 +1697,7 @@ vuint64m1_t test_vor_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1 // // CHECK-RV64-LABEL: @test_vor_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vor_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -1707,7 +1707,7 @@ vuint64m1_t test_vor_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1 // // CHECK-RV64-LABEL: @test_vor_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vor_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -1717,7 +1717,7 @@ vuint64m2_t test_vor_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2 // // CHECK-RV64-LABEL: @test_vor_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vor_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -1727,7 +1727,7 @@ vuint64m2_t test_vor_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2 // // CHECK-RV64-LABEL: @test_vor_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vor_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -1737,7 +1737,7 @@ vuint64m4_t test_vor_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4 // // CHECK-RV64-LABEL: @test_vor_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vor_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -1747,7 +1747,7 @@ vuint64m4_t test_vor_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4 // // CHECK-RV64-LABEL: @test_vor_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vor_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1757,7 +1757,7 @@ vuint64m8_t test_vor_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_ // // CHECK-RV64-LABEL: @test_vor_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vor_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vpopc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vpopc.c index f64f4f2..4a7d9cb 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vpopc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vpopc.c @@ -77,7 +77,7 @@ unsigned long test_vpopc_m_b64(vbool64_t op1, size_t vl) { // // CHECK-RV64-LABEL: @test_vpopc_m_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv64i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv64i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // unsigned long test_vpopc_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) { @@ -87,7 +87,7 @@ unsigned long test_vpopc_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) { // // CHECK-RV64-LABEL: @test_vpopc_m_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv32i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv32i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // unsigned long test_vpopc_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) { @@ -97,7 +97,7 @@ unsigned long test_vpopc_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) { // // CHECK-RV64-LABEL: @test_vpopc_m_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv16i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv16i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // unsigned long test_vpopc_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) { @@ -107,7 +107,7 @@ unsigned long test_vpopc_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) { // // CHECK-RV64-LABEL: @test_vpopc_m_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv8i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv8i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // unsigned long test_vpopc_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) { @@ -117,7 +117,7 @@ unsigned long test_vpopc_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) { // // CHECK-RV64-LABEL: @test_vpopc_m_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv4i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv4i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // unsigned long test_vpopc_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) { @@ -127,7 +127,7 @@ unsigned long test_vpopc_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) { // // CHECK-RV64-LABEL: @test_vpopc_m_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv2i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv2i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // unsigned long test_vpopc_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) { @@ -137,7 +137,7 @@ unsigned long test_vpopc_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) { // // CHECK-RV64-LABEL: @test_vpopc_m_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv1i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv1i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // unsigned long test_vpopc_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredand.c index cc070db6..6aa7874 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredand.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredand.c @@ -491,7 +491,7 @@ vuint64m1_t test_vredand_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector, // // CHECK-RV64-LABEL: @test_vredand_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst, @@ -503,7 +503,7 @@ vint8m1_t test_vredand_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst, @@ -515,7 +515,7 @@ vint8m1_t test_vredand_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst, @@ -527,7 +527,7 @@ vint8m1_t test_vredand_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst, @@ -539,7 +539,7 @@ vint8m1_t test_vredand_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst, @@ -551,7 +551,7 @@ vint8m1_t test_vredand_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst, @@ -563,7 +563,7 @@ vint8m1_t test_vredand_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst, @@ -575,7 +575,7 @@ vint8m1_t test_vredand_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredand_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst, @@ -587,7 +587,7 @@ vint16m1_t test_vredand_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredand_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst, @@ -599,7 +599,7 @@ vint16m1_t test_vredand_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredand_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst, @@ -611,7 +611,7 @@ vint16m1_t test_vredand_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredand_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst, @@ -623,7 +623,7 @@ vint16m1_t test_vredand_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredand_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst, @@ -635,7 +635,7 @@ vint16m1_t test_vredand_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredand_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst, @@ -647,7 +647,7 @@ vint16m1_t test_vredand_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredand_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst, @@ -659,7 +659,7 @@ vint32m1_t test_vredand_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredand_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst, @@ -671,7 +671,7 @@ vint32m1_t test_vredand_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredand_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst, @@ -683,7 +683,7 @@ vint32m1_t test_vredand_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredand_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst, @@ -695,7 +695,7 @@ vint32m1_t test_vredand_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredand_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst, @@ -707,7 +707,7 @@ vint32m1_t test_vredand_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredand_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst, @@ -719,7 +719,7 @@ vint64m1_t test_vredand_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst, // // CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredand_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst, @@ -731,7 +731,7 @@ vint64m1_t test_vredand_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst, // // CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredand_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst, @@ -743,7 +743,7 @@ vint64m1_t test_vredand_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst, // // CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredand_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst, @@ -755,7 +755,7 @@ vint64m1_t test_vredand_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst, // // CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst, @@ -767,7 +767,7 @@ vuint8m1_t test_vredand_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst, @@ -779,7 +779,7 @@ vuint8m1_t test_vredand_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst, @@ -791,7 +791,7 @@ vuint8m1_t test_vredand_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst, @@ -803,7 +803,7 @@ vuint8m1_t test_vredand_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst, @@ -815,7 +815,7 @@ vuint8m1_t test_vredand_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst, @@ -827,7 +827,7 @@ vuint8m1_t test_vredand_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst, @@ -839,7 +839,7 @@ vuint8m1_t test_vredand_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredand_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst, @@ -851,7 +851,7 @@ vuint16m1_t test_vredand_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredand_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst, @@ -863,7 +863,7 @@ vuint16m1_t test_vredand_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredand_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst, @@ -875,7 +875,7 @@ vuint16m1_t test_vredand_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredand_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst, @@ -887,7 +887,7 @@ vuint16m1_t test_vredand_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredand_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst, @@ -899,7 +899,7 @@ vuint16m1_t test_vredand_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredand_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst, @@ -911,7 +911,7 @@ vuint16m1_t test_vredand_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredand_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst, @@ -923,7 +923,7 @@ vuint32m1_t test_vredand_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredand_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst, @@ -935,7 +935,7 @@ vuint32m1_t test_vredand_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredand_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst, @@ -947,7 +947,7 @@ vuint32m1_t test_vredand_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredand_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst, @@ -959,7 +959,7 @@ vuint32m1_t test_vredand_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredand_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst, @@ -971,7 +971,7 @@ vuint32m1_t test_vredand_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredand_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst, @@ -983,7 +983,7 @@ vuint64m1_t test_vredand_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst, // // CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredand_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst, @@ -995,7 +995,7 @@ vuint64m1_t test_vredand_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst, // // CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredand_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst, @@ -1007,7 +1007,7 @@ vuint64m1_t test_vredand_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst, // // CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredand_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmax.c index aff604a..ed1ae50 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmax.c @@ -491,7 +491,7 @@ vuint64m1_t test_vredmaxu_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector, // // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst, @@ -503,7 +503,7 @@ vint8m1_t test_vredmax_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst, @@ -515,7 +515,7 @@ vint8m1_t test_vredmax_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst, @@ -527,7 +527,7 @@ vint8m1_t test_vredmax_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst, @@ -539,7 +539,7 @@ vint8m1_t test_vredmax_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst, @@ -551,7 +551,7 @@ vint8m1_t test_vredmax_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst, @@ -563,7 +563,7 @@ vint8m1_t test_vredmax_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst, @@ -575,7 +575,7 @@ vint8m1_t test_vredmax_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmax_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst, @@ -587,7 +587,7 @@ vint16m1_t test_vredmax_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmax_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst, @@ -599,7 +599,7 @@ vint16m1_t test_vredmax_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmax_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst, @@ -611,7 +611,7 @@ vint16m1_t test_vredmax_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmax_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst, @@ -623,7 +623,7 @@ vint16m1_t test_vredmax_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmax_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst, @@ -635,7 +635,7 @@ vint16m1_t test_vredmax_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmax_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst, @@ -647,7 +647,7 @@ vint16m1_t test_vredmax_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmax_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst, @@ -659,7 +659,7 @@ vint32m1_t test_vredmax_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmax_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst, @@ -671,7 +671,7 @@ vint32m1_t test_vredmax_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmax_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst, @@ -683,7 +683,7 @@ vint32m1_t test_vredmax_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmax_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst, @@ -695,7 +695,7 @@ vint32m1_t test_vredmax_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmax_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst, @@ -707,7 +707,7 @@ vint32m1_t test_vredmax_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmax_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst, @@ -719,7 +719,7 @@ vint64m1_t test_vredmax_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst, // // CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmax_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst, @@ -731,7 +731,7 @@ vint64m1_t test_vredmax_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst, // // CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmax_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst, @@ -743,7 +743,7 @@ vint64m1_t test_vredmax_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst, // // CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmax_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst, @@ -755,7 +755,7 @@ vint64m1_t test_vredmax_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst, // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst, @@ -767,7 +767,7 @@ vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst, @@ -779,7 +779,7 @@ vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst, @@ -791,7 +791,7 @@ vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst, @@ -803,7 +803,7 @@ vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst, @@ -815,7 +815,7 @@ vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst, @@ -827,7 +827,7 @@ vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst, @@ -839,7 +839,7 @@ vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst, @@ -851,7 +851,7 @@ vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst, @@ -863,7 +863,7 @@ vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst, @@ -875,7 +875,7 @@ vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst, @@ -887,7 +887,7 @@ vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst, @@ -899,7 +899,7 @@ vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst, @@ -911,7 +911,7 @@ vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst, @@ -923,7 +923,7 @@ vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst, @@ -935,7 +935,7 @@ vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst, @@ -947,7 +947,7 @@ vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst, @@ -959,7 +959,7 @@ vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst, @@ -971,7 +971,7 @@ vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst, @@ -983,7 +983,7 @@ vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst, // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst, @@ -995,7 +995,7 @@ vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst, // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst, @@ -1007,7 +1007,7 @@ vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst, // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmin.c index 19d8456..bdf2205 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmin.c @@ -491,7 +491,7 @@ vuint64m1_t test_vredminu_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector, // // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst, @@ -503,7 +503,7 @@ vint8m1_t test_vredmin_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst, @@ -515,7 +515,7 @@ vint8m1_t test_vredmin_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst, @@ -527,7 +527,7 @@ vint8m1_t test_vredmin_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst, @@ -539,7 +539,7 @@ vint8m1_t test_vredmin_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst, @@ -551,7 +551,7 @@ vint8m1_t test_vredmin_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst, @@ -563,7 +563,7 @@ vint8m1_t test_vredmin_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst, @@ -575,7 +575,7 @@ vint8m1_t test_vredmin_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmin_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst, @@ -587,7 +587,7 @@ vint16m1_t test_vredmin_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmin_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst, @@ -599,7 +599,7 @@ vint16m1_t test_vredmin_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmin_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst, @@ -611,7 +611,7 @@ vint16m1_t test_vredmin_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmin_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst, @@ -623,7 +623,7 @@ vint16m1_t test_vredmin_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmin_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst, @@ -635,7 +635,7 @@ vint16m1_t test_vredmin_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmin_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst, @@ -647,7 +647,7 @@ vint16m1_t test_vredmin_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmin_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst, @@ -659,7 +659,7 @@ vint32m1_t test_vredmin_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmin_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst, @@ -671,7 +671,7 @@ vint32m1_t test_vredmin_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmin_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst, @@ -683,7 +683,7 @@ vint32m1_t test_vredmin_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmin_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst, @@ -695,7 +695,7 @@ vint32m1_t test_vredmin_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmin_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst, @@ -707,7 +707,7 @@ vint32m1_t test_vredmin_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmin_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst, @@ -719,7 +719,7 @@ vint64m1_t test_vredmin_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst, // // CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmin_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst, @@ -731,7 +731,7 @@ vint64m1_t test_vredmin_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst, // // CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmin_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst, @@ -743,7 +743,7 @@ vint64m1_t test_vredmin_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst, // // CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmin_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst, @@ -755,7 +755,7 @@ vint64m1_t test_vredmin_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst, // // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst, @@ -767,7 +767,7 @@ vuint8m1_t test_vredminu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst, @@ -779,7 +779,7 @@ vuint8m1_t test_vredminu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst, @@ -791,7 +791,7 @@ vuint8m1_t test_vredminu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst, @@ -803,7 +803,7 @@ vuint8m1_t test_vredminu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst, @@ -815,7 +815,7 @@ vuint8m1_t test_vredminu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst, @@ -827,7 +827,7 @@ vuint8m1_t test_vredminu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst, @@ -839,7 +839,7 @@ vuint8m1_t test_vredminu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredminu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst, @@ -851,7 +851,7 @@ vuint16m1_t test_vredminu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredminu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst, @@ -863,7 +863,7 @@ vuint16m1_t test_vredminu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredminu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst, @@ -875,7 +875,7 @@ vuint16m1_t test_vredminu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredminu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst, @@ -887,7 +887,7 @@ vuint16m1_t test_vredminu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredminu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst, @@ -899,7 +899,7 @@ vuint16m1_t test_vredminu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredminu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst, @@ -911,7 +911,7 @@ vuint16m1_t test_vredminu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredminu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst, @@ -923,7 +923,7 @@ vuint32m1_t test_vredminu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredminu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst, @@ -935,7 +935,7 @@ vuint32m1_t test_vredminu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredminu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst, @@ -947,7 +947,7 @@ vuint32m1_t test_vredminu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredminu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst, @@ -959,7 +959,7 @@ vuint32m1_t test_vredminu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredminu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst, @@ -971,7 +971,7 @@ vuint32m1_t test_vredminu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredminu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst, @@ -983,7 +983,7 @@ vuint64m1_t test_vredminu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst, // // CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredminu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst, @@ -995,7 +995,7 @@ vuint64m1_t test_vredminu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst, // // CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredminu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst, @@ -1007,7 +1007,7 @@ vuint64m1_t test_vredminu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst, // // CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredminu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredor.c index 4b6c8c1..60c303b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredor.c @@ -491,7 +491,7 @@ vuint64m1_t test_vredor_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector, // // CHECK-RV64-LABEL: @test_vredor_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst, @@ -503,7 +503,7 @@ vint8m1_t test_vredor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst, @@ -515,7 +515,7 @@ vint8m1_t test_vredor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst, @@ -527,7 +527,7 @@ vint8m1_t test_vredor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst, @@ -539,7 +539,7 @@ vint8m1_t test_vredor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst, @@ -551,7 +551,7 @@ vint8m1_t test_vredor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst, @@ -563,7 +563,7 @@ vint8m1_t test_vredor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst, @@ -575,7 +575,7 @@ vint8m1_t test_vredor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst, @@ -587,7 +587,7 @@ vint16m1_t test_vredor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst, @@ -599,7 +599,7 @@ vint16m1_t test_vredor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst, @@ -611,7 +611,7 @@ vint16m1_t test_vredor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst, @@ -623,7 +623,7 @@ vint16m1_t test_vredor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst, @@ -635,7 +635,7 @@ vint16m1_t test_vredor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst, @@ -647,7 +647,7 @@ vint16m1_t test_vredor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst, @@ -659,7 +659,7 @@ vint32m1_t test_vredor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst, @@ -671,7 +671,7 @@ vint32m1_t test_vredor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst, @@ -683,7 +683,7 @@ vint32m1_t test_vredor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst, @@ -695,7 +695,7 @@ vint32m1_t test_vredor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst, @@ -707,7 +707,7 @@ vint32m1_t test_vredor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst, @@ -719,7 +719,7 @@ vint64m1_t test_vredor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst, // // CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst, @@ -731,7 +731,7 @@ vint64m1_t test_vredor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst, // // CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst, @@ -743,7 +743,7 @@ vint64m1_t test_vredor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst, // // CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst, @@ -755,7 +755,7 @@ vint64m1_t test_vredor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst, // // CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst, @@ -767,7 +767,7 @@ vuint8m1_t test_vredor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst, @@ -779,7 +779,7 @@ vuint8m1_t test_vredor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst, @@ -791,7 +791,7 @@ vuint8m1_t test_vredor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst, @@ -803,7 +803,7 @@ vuint8m1_t test_vredor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst, @@ -815,7 +815,7 @@ vuint8m1_t test_vredor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst, @@ -827,7 +827,7 @@ vuint8m1_t test_vredor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst, @@ -839,7 +839,7 @@ vuint8m1_t test_vredor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst, @@ -851,7 +851,7 @@ vuint16m1_t test_vredor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst, @@ -863,7 +863,7 @@ vuint16m1_t test_vredor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst, @@ -875,7 +875,7 @@ vuint16m1_t test_vredor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst, @@ -887,7 +887,7 @@ vuint16m1_t test_vredor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst, @@ -899,7 +899,7 @@ vuint16m1_t test_vredor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst, @@ -911,7 +911,7 @@ vuint16m1_t test_vredor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst, @@ -923,7 +923,7 @@ vuint32m1_t test_vredor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst, @@ -935,7 +935,7 @@ vuint32m1_t test_vredor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst, @@ -947,7 +947,7 @@ vuint32m1_t test_vredor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst, @@ -959,7 +959,7 @@ vuint32m1_t test_vredor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst, @@ -971,7 +971,7 @@ vuint32m1_t test_vredor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst, @@ -983,7 +983,7 @@ vuint64m1_t test_vredor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst, // // CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst, @@ -995,7 +995,7 @@ vuint64m1_t test_vredor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst, // // CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst, @@ -1007,7 +1007,7 @@ vuint64m1_t test_vredor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst, // // CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredsum.c index 72e3a09..a770eb9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredsum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredsum.c @@ -491,7 +491,7 @@ vuint64m1_t test_vredsum_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector, // // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst, @@ -503,7 +503,7 @@ vint8m1_t test_vredsum_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst, @@ -515,7 +515,7 @@ vint8m1_t test_vredsum_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst, @@ -527,7 +527,7 @@ vint8m1_t test_vredsum_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst, @@ -539,7 +539,7 @@ vint8m1_t test_vredsum_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst, @@ -551,7 +551,7 @@ vint8m1_t test_vredsum_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst, @@ -563,7 +563,7 @@ vint8m1_t test_vredsum_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst, @@ -575,7 +575,7 @@ vint8m1_t test_vredsum_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredsum_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst, @@ -587,7 +587,7 @@ vint16m1_t test_vredsum_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredsum_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst, @@ -599,7 +599,7 @@ vint16m1_t test_vredsum_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredsum_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst, @@ -611,7 +611,7 @@ vint16m1_t test_vredsum_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredsum_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst, @@ -623,7 +623,7 @@ vint16m1_t test_vredsum_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredsum_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst, @@ -635,7 +635,7 @@ vint16m1_t test_vredsum_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredsum_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst, @@ -647,7 +647,7 @@ vint16m1_t test_vredsum_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredsum_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst, @@ -659,7 +659,7 @@ vint32m1_t test_vredsum_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredsum_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst, @@ -671,7 +671,7 @@ vint32m1_t test_vredsum_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredsum_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst, @@ -683,7 +683,7 @@ vint32m1_t test_vredsum_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredsum_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst, @@ -695,7 +695,7 @@ vint32m1_t test_vredsum_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredsum_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst, @@ -707,7 +707,7 @@ vint32m1_t test_vredsum_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredsum_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst, @@ -719,7 +719,7 @@ vint64m1_t test_vredsum_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst, // // CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredsum_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst, @@ -731,7 +731,7 @@ vint64m1_t test_vredsum_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst, // // CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredsum_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst, @@ -743,7 +743,7 @@ vint64m1_t test_vredsum_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst, // // CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredsum_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst, @@ -755,7 +755,7 @@ vint64m1_t test_vredsum_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst, // // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst, @@ -767,7 +767,7 @@ vuint8m1_t test_vredsum_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst, @@ -779,7 +779,7 @@ vuint8m1_t test_vredsum_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst, @@ -791,7 +791,7 @@ vuint8m1_t test_vredsum_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst, @@ -803,7 +803,7 @@ vuint8m1_t test_vredsum_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst, @@ -815,7 +815,7 @@ vuint8m1_t test_vredsum_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst, @@ -827,7 +827,7 @@ vuint8m1_t test_vredsum_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst, @@ -839,7 +839,7 @@ vuint8m1_t test_vredsum_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredsum_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst, @@ -851,7 +851,7 @@ vuint16m1_t test_vredsum_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredsum_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst, @@ -863,7 +863,7 @@ vuint16m1_t test_vredsum_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredsum_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst, @@ -875,7 +875,7 @@ vuint16m1_t test_vredsum_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredsum_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst, @@ -887,7 +887,7 @@ vuint16m1_t test_vredsum_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredsum_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst, @@ -899,7 +899,7 @@ vuint16m1_t test_vredsum_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredsum_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst, @@ -911,7 +911,7 @@ vuint16m1_t test_vredsum_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredsum_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst, @@ -923,7 +923,7 @@ vuint32m1_t test_vredsum_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredsum_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst, @@ -935,7 +935,7 @@ vuint32m1_t test_vredsum_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredsum_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst, @@ -947,7 +947,7 @@ vuint32m1_t test_vredsum_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredsum_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst, @@ -959,7 +959,7 @@ vuint32m1_t test_vredsum_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredsum_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst, @@ -971,7 +971,7 @@ vuint32m1_t test_vredsum_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredsum_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst, @@ -983,7 +983,7 @@ vuint64m1_t test_vredsum_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst, // // CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredsum_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst, @@ -995,7 +995,7 @@ vuint64m1_t test_vredsum_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst, // // CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredsum_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst, @@ -1007,7 +1007,7 @@ vuint64m1_t test_vredsum_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst, // // CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredsum_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredxor.c index 1773411..9ad3e71 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredxor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredxor.c @@ -491,7 +491,7 @@ vuint64m1_t test_vredxor_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector, // // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst, @@ -503,7 +503,7 @@ vint8m1_t test_vredxor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst, @@ -515,7 +515,7 @@ vint8m1_t test_vredxor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst, @@ -527,7 +527,7 @@ vint8m1_t test_vredxor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst, @@ -539,7 +539,7 @@ vint8m1_t test_vredxor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst, @@ -551,7 +551,7 @@ vint8m1_t test_vredxor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst, @@ -563,7 +563,7 @@ vint8m1_t test_vredxor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst, @@ -575,7 +575,7 @@ vint8m1_t test_vredxor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredxor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst, @@ -587,7 +587,7 @@ vint16m1_t test_vredxor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredxor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst, @@ -599,7 +599,7 @@ vint16m1_t test_vredxor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredxor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst, @@ -611,7 +611,7 @@ vint16m1_t test_vredxor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredxor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst, @@ -623,7 +623,7 @@ vint16m1_t test_vredxor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredxor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst, @@ -635,7 +635,7 @@ vint16m1_t test_vredxor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredxor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst, @@ -647,7 +647,7 @@ vint16m1_t test_vredxor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredxor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst, @@ -659,7 +659,7 @@ vint32m1_t test_vredxor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredxor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst, @@ -671,7 +671,7 @@ vint32m1_t test_vredxor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredxor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst, @@ -683,7 +683,7 @@ vint32m1_t test_vredxor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredxor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst, @@ -695,7 +695,7 @@ vint32m1_t test_vredxor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredxor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst, @@ -707,7 +707,7 @@ vint32m1_t test_vredxor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredxor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst, @@ -719,7 +719,7 @@ vint64m1_t test_vredxor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst, // // CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredxor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst, @@ -731,7 +731,7 @@ vint64m1_t test_vredxor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst, // // CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredxor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst, @@ -743,7 +743,7 @@ vint64m1_t test_vredxor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst, // // CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredxor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst, @@ -755,7 +755,7 @@ vint64m1_t test_vredxor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst, // // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst, @@ -767,7 +767,7 @@ vuint8m1_t test_vredxor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst, @@ -779,7 +779,7 @@ vuint8m1_t test_vredxor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst, @@ -791,7 +791,7 @@ vuint8m1_t test_vredxor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst, @@ -803,7 +803,7 @@ vuint8m1_t test_vredxor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst, @@ -815,7 +815,7 @@ vuint8m1_t test_vredxor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst, @@ -827,7 +827,7 @@ vuint8m1_t test_vredxor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst, @@ -839,7 +839,7 @@ vuint8m1_t test_vredxor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst, // // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredxor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst, @@ -851,7 +851,7 @@ vuint16m1_t test_vredxor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredxor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst, @@ -863,7 +863,7 @@ vuint16m1_t test_vredxor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredxor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst, @@ -875,7 +875,7 @@ vuint16m1_t test_vredxor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredxor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst, @@ -887,7 +887,7 @@ vuint16m1_t test_vredxor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredxor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst, @@ -899,7 +899,7 @@ vuint16m1_t test_vredxor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredxor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst, @@ -911,7 +911,7 @@ vuint16m1_t test_vredxor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst, // // CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredxor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst, @@ -923,7 +923,7 @@ vuint32m1_t test_vredxor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredxor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst, @@ -935,7 +935,7 @@ vuint32m1_t test_vredxor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredxor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst, @@ -947,7 +947,7 @@ vuint32m1_t test_vredxor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredxor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst, @@ -959,7 +959,7 @@ vuint32m1_t test_vredxor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredxor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst, @@ -971,7 +971,7 @@ vuint32m1_t test_vredxor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst, // // CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredxor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst, @@ -983,7 +983,7 @@ vuint64m1_t test_vredxor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst, // // CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredxor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst, @@ -995,7 +995,7 @@ vuint64m1_t test_vredxor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst, // // CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredxor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst, @@ -1007,7 +1007,7 @@ vuint64m1_t test_vredxor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst, // // CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredxor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c index 2571b6b..d791364 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c @@ -887,7 +887,7 @@ vuint64m8_t test_vremu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { // // CHECK-RV64-LABEL: @test_vrem_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrem_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -897,7 +897,7 @@ vint8mf8_t test_vrem_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t // // CHECK-RV64-LABEL: @test_vrem_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrem_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { @@ -907,7 +907,7 @@ vint8mf8_t test_vrem_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t // // CHECK-RV64-LABEL: @test_vrem_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrem_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -917,7 +917,7 @@ vint8mf4_t test_vrem_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t // // CHECK-RV64-LABEL: @test_vrem_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrem_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { @@ -927,7 +927,7 @@ vint8mf4_t test_vrem_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t // // CHECK-RV64-LABEL: @test_vrem_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrem_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -937,7 +937,7 @@ vint8mf2_t test_vrem_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t // // CHECK-RV64-LABEL: @test_vrem_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrem_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { @@ -947,7 +947,7 @@ vint8mf2_t test_vrem_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t // // CHECK-RV64-LABEL: @test_vrem_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrem_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -957,7 +957,7 @@ vint8m1_t test_vrem_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, // // CHECK-RV64-LABEL: @test_vrem_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrem_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { @@ -967,7 +967,7 @@ vint8m1_t test_vrem_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, // // CHECK-RV64-LABEL: @test_vrem_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrem_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -977,7 +977,7 @@ vint8m2_t test_vrem_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, // // CHECK-RV64-LABEL: @test_vrem_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrem_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { @@ -987,7 +987,7 @@ vint8m2_t test_vrem_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, // // CHECK-RV64-LABEL: @test_vrem_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrem_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -997,7 +997,7 @@ vint8m4_t test_vrem_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, // // CHECK-RV64-LABEL: @test_vrem_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrem_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { @@ -1007,7 +1007,7 @@ vint8m4_t test_vrem_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, // // CHECK-RV64-LABEL: @test_vrem_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrem_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -1017,7 +1017,7 @@ vint8m8_t test_vrem_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, // // CHECK-RV64-LABEL: @test_vrem_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrem_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { @@ -1027,7 +1027,7 @@ vint8m8_t test_vrem_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, // // CHECK-RV64-LABEL: @test_vrem_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrem_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -1037,7 +1037,7 @@ vint16mf4_t test_vrem_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vrem_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrem_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { @@ -1047,7 +1047,7 @@ vint16mf4_t test_vrem_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vrem_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrem_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -1057,7 +1057,7 @@ vint16mf2_t test_vrem_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vrem_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrem_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { @@ -1067,7 +1067,7 @@ vint16mf2_t test_vrem_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vrem_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrem_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -1077,7 +1077,7 @@ vint16m1_t test_vrem_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t // // CHECK-RV64-LABEL: @test_vrem_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrem_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { @@ -1087,7 +1087,7 @@ vint16m1_t test_vrem_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t // // CHECK-RV64-LABEL: @test_vrem_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrem_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -1097,7 +1097,7 @@ vint16m2_t test_vrem_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // // CHECK-RV64-LABEL: @test_vrem_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrem_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { @@ -1107,7 +1107,7 @@ vint16m2_t test_vrem_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // // CHECK-RV64-LABEL: @test_vrem_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrem_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1117,7 +1117,7 @@ vint16m4_t test_vrem_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // // CHECK-RV64-LABEL: @test_vrem_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrem_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { @@ -1127,7 +1127,7 @@ vint16m4_t test_vrem_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // // CHECK-RV64-LABEL: @test_vrem_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrem_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -1137,7 +1137,7 @@ vint16m8_t test_vrem_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // // CHECK-RV64-LABEL: @test_vrem_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrem_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { @@ -1147,7 +1147,7 @@ vint16m8_t test_vrem_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // // CHECK-RV64-LABEL: @test_vrem_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrem_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1157,7 +1157,7 @@ vint32mf2_t test_vrem_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m // // CHECK-RV64-LABEL: @test_vrem_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrem_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { @@ -1167,7 +1167,7 @@ vint32mf2_t test_vrem_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m // // CHECK-RV64-LABEL: @test_vrem_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrem_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1177,7 +1177,7 @@ vint32m1_t test_vrem_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t // // CHECK-RV64-LABEL: @test_vrem_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrem_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { @@ -1187,7 +1187,7 @@ vint32m1_t test_vrem_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t // // CHECK-RV64-LABEL: @test_vrem_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrem_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1197,7 +1197,7 @@ vint32m2_t test_vrem_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t // // CHECK-RV64-LABEL: @test_vrem_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrem_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { @@ -1207,7 +1207,7 @@ vint32m2_t test_vrem_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t // // CHECK-RV64-LABEL: @test_vrem_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrem_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1217,7 +1217,7 @@ vint32m4_t test_vrem_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // // CHECK-RV64-LABEL: @test_vrem_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrem_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { @@ -1227,7 +1227,7 @@ vint32m4_t test_vrem_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // // CHECK-RV64-LABEL: @test_vrem_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrem_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1237,7 +1237,7 @@ vint32m8_t test_vrem_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // // CHECK-RV64-LABEL: @test_vrem_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrem_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { @@ -1247,7 +1247,7 @@ vint32m8_t test_vrem_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // // CHECK-RV64-LABEL: @test_vrem_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrem_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -1257,7 +1257,7 @@ vint64m1_t test_vrem_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t // // CHECK-RV64-LABEL: @test_vrem_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrem_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { @@ -1267,7 +1267,7 @@ vint64m1_t test_vrem_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t // // CHECK-RV64-LABEL: @test_vrem_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrem_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -1277,7 +1277,7 @@ vint64m2_t test_vrem_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t // // CHECK-RV64-LABEL: @test_vrem_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrem_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { @@ -1287,7 +1287,7 @@ vint64m2_t test_vrem_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t // // CHECK-RV64-LABEL: @test_vrem_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrem_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -1297,7 +1297,7 @@ vint64m4_t test_vrem_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t // // CHECK-RV64-LABEL: @test_vrem_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrem_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { @@ -1307,7 +1307,7 @@ vint64m4_t test_vrem_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t // // CHECK-RV64-LABEL: @test_vrem_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrem_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -1317,7 +1317,7 @@ vint64m8_t test_vrem_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // // CHECK-RV64-LABEL: @test_vrem_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrem_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { @@ -1327,7 +1327,7 @@ vint64m8_t test_vrem_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // // CHECK-RV64-LABEL: @test_vremu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vremu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1337,7 +1337,7 @@ vuint8mf8_t test_vremu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m // // CHECK-RV64-LABEL: @test_vremu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vremu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -1347,7 +1347,7 @@ vuint8mf8_t test_vremu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m // // CHECK-RV64-LABEL: @test_vremu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vremu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1357,7 +1357,7 @@ vuint8mf4_t test_vremu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m // // CHECK-RV64-LABEL: @test_vremu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vremu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -1367,7 +1367,7 @@ vuint8mf4_t test_vremu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m // // CHECK-RV64-LABEL: @test_vremu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vremu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1377,7 +1377,7 @@ vuint8mf2_t test_vremu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m // // CHECK-RV64-LABEL: @test_vremu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vremu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -1387,7 +1387,7 @@ vuint8mf2_t test_vremu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m // // CHECK-RV64-LABEL: @test_vremu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vremu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1397,7 +1397,7 @@ vuint8m1_t test_vremu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // // CHECK-RV64-LABEL: @test_vremu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vremu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -1407,7 +1407,7 @@ vuint8m1_t test_vremu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // // CHECK-RV64-LABEL: @test_vremu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vremu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1417,7 +1417,7 @@ vuint8m2_t test_vremu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // // CHECK-RV64-LABEL: @test_vremu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vremu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -1427,7 +1427,7 @@ vuint8m2_t test_vremu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // // CHECK-RV64-LABEL: @test_vremu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vremu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -1437,7 +1437,7 @@ vuint8m4_t test_vremu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // // CHECK-RV64-LABEL: @test_vremu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vremu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -1447,7 +1447,7 @@ vuint8m4_t test_vremu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // // CHECK-RV64-LABEL: @test_vremu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vremu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -1457,7 +1457,7 @@ vuint8m8_t test_vremu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // // CHECK-RV64-LABEL: @test_vremu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vremu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -1467,7 +1467,7 @@ vuint8m8_t test_vremu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // // CHECK-RV64-LABEL: @test_vremu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vremu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -1477,7 +1477,7 @@ vuint16mf4_t test_vremu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vremu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vremu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -1487,7 +1487,7 @@ vuint16mf4_t test_vremu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vremu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vremu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -1497,7 +1497,7 @@ vuint16mf2_t test_vremu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vremu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vremu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -1507,7 +1507,7 @@ vuint16mf2_t test_vremu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vremu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vremu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -1517,7 +1517,7 @@ vuint16m1_t test_vremu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16 // // CHECK-RV64-LABEL: @test_vremu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vremu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -1527,7 +1527,7 @@ vuint16m1_t test_vremu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16 // // CHECK-RV64-LABEL: @test_vremu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vremu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -1537,7 +1537,7 @@ vuint16m2_t test_vremu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m // // CHECK-RV64-LABEL: @test_vremu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vremu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -1547,7 +1547,7 @@ vuint16m2_t test_vremu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m // // CHECK-RV64-LABEL: @test_vremu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vremu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -1557,7 +1557,7 @@ vuint16m4_t test_vremu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m // // CHECK-RV64-LABEL: @test_vremu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vremu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -1567,7 +1567,7 @@ vuint16m4_t test_vremu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m // // CHECK-RV64-LABEL: @test_vremu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vremu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -1577,7 +1577,7 @@ vuint16m8_t test_vremu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m // // CHECK-RV64-LABEL: @test_vremu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vremu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -1587,7 +1587,7 @@ vuint16m8_t test_vremu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m // // CHECK-RV64-LABEL: @test_vremu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vremu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1597,7 +1597,7 @@ vuint32mf2_t test_vremu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vremu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vremu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1607,7 +1607,7 @@ vuint32mf2_t test_vremu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vremu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vremu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1617,7 +1617,7 @@ vuint32m1_t test_vremu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32 // // CHECK-RV64-LABEL: @test_vremu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vremu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1627,7 +1627,7 @@ vuint32m1_t test_vremu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32 // // CHECK-RV64-LABEL: @test_vremu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vremu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1637,7 +1637,7 @@ vuint32m2_t test_vremu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32 // // CHECK-RV64-LABEL: @test_vremu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vremu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1647,7 +1647,7 @@ vuint32m2_t test_vremu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32 // // CHECK-RV64-LABEL: @test_vremu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vremu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1657,7 +1657,7 @@ vuint32m4_t test_vremu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vremu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vremu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1667,7 +1667,7 @@ vuint32m4_t test_vremu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vremu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vremu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1677,7 +1677,7 @@ vuint32m8_t test_vremu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vremu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vremu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -1687,7 +1687,7 @@ vuint32m8_t test_vremu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vremu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vremu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -1697,7 +1697,7 @@ vuint64m1_t test_vremu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64 // // CHECK-RV64-LABEL: @test_vremu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vremu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -1707,7 +1707,7 @@ vuint64m1_t test_vremu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64 // // CHECK-RV64-LABEL: @test_vremu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vremu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -1717,7 +1717,7 @@ vuint64m2_t test_vremu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64 // // CHECK-RV64-LABEL: @test_vremu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vremu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -1727,7 +1727,7 @@ vuint64m2_t test_vremu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64 // // CHECK-RV64-LABEL: @test_vremu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vremu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -1737,7 +1737,7 @@ vuint64m4_t test_vremu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64 // // CHECK-RV64-LABEL: @test_vremu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vremu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -1747,7 +1747,7 @@ vuint64m4_t test_vremu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64 // // CHECK-RV64-LABEL: @test_vremu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vremu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1757,7 +1757,7 @@ vuint64m8_t test_vremu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vremu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vremu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c index d536156..04bfd70 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c @@ -1675,7 +1675,7 @@ vfloat64m8_t test_vrgatherei16_vv_f64m8(vfloat64m8_t op1, vuint16m2_t op2, // // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgather_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, @@ -1687,7 +1687,7 @@ vint8mf8_t test_vrgather_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgather_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, @@ -1698,7 +1698,7 @@ vint8mf8_t test_vrgather_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgather_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, @@ -1710,7 +1710,7 @@ vint8mf4_t test_vrgather_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgather_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, @@ -1721,7 +1721,7 @@ vint8mf4_t test_vrgather_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgather_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, @@ -1733,7 +1733,7 @@ vint8mf2_t test_vrgather_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgather_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, @@ -1744,7 +1744,7 @@ vint8mf2_t test_vrgather_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgather_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, @@ -1755,7 +1755,7 @@ vint8m1_t test_vrgather_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgather_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, @@ -1766,7 +1766,7 @@ vint8m1_t test_vrgather_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgather_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, @@ -1777,7 +1777,7 @@ vint8m2_t test_vrgather_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgather_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, @@ -1788,7 +1788,7 @@ vint8m2_t test_vrgather_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgather_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, @@ -1799,7 +1799,7 @@ vint8m4_t test_vrgather_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgather_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, @@ -1810,7 +1810,7 @@ vint8m4_t test_vrgather_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrgather_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, @@ -1821,7 +1821,7 @@ vint8m8_t test_vrgather_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrgather_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, @@ -1832,7 +1832,7 @@ vint8m8_t test_vrgather_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgather_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -1844,7 +1844,7 @@ vint16mf4_t test_vrgather_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgather_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -1856,7 +1856,7 @@ vint16mf4_t test_vrgather_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgather_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -1868,7 +1868,7 @@ vint16mf2_t test_vrgather_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgather_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -1880,7 +1880,7 @@ vint16mf2_t test_vrgather_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgather_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -1892,7 +1892,7 @@ vint16m1_t test_vrgather_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgather_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -1903,7 +1903,7 @@ vint16m1_t test_vrgather_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgather_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -1915,7 +1915,7 @@ vint16m2_t test_vrgather_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgather_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -1926,7 +1926,7 @@ vint16m2_t test_vrgather_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgather_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -1938,7 +1938,7 @@ vint16m4_t test_vrgather_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgather_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -1949,7 +1949,7 @@ vint16m4_t test_vrgather_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgather_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -1961,7 +1961,7 @@ vint16m8_t test_vrgather_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgather_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -1972,7 +1972,7 @@ vint16m8_t test_vrgather_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgather_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -1984,7 +1984,7 @@ vint32mf2_t test_vrgather_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgather_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -1996,7 +1996,7 @@ vint32mf2_t test_vrgather_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgather_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -2008,7 +2008,7 @@ vint32m1_t test_vrgather_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgather_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -2019,7 +2019,7 @@ vint32m1_t test_vrgather_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgather_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -2031,7 +2031,7 @@ vint32m2_t test_vrgather_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgather_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -2042,7 +2042,7 @@ vint32m2_t test_vrgather_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgather_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -2054,7 +2054,7 @@ vint32m4_t test_vrgather_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgather_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -2065,7 +2065,7 @@ vint32m4_t test_vrgather_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgather_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -2077,7 +2077,7 @@ vint32m8_t test_vrgather_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgather_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -2088,7 +2088,7 @@ vint32m8_t test_vrgather_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgather_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -2100,7 +2100,7 @@ vint64m1_t test_vrgather_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgather_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -2111,7 +2111,7 @@ vint64m1_t test_vrgather_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgather_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -2123,7 +2123,7 @@ vint64m2_t test_vrgather_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgather_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -2134,7 +2134,7 @@ vint64m2_t test_vrgather_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgather_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -2146,7 +2146,7 @@ vint64m4_t test_vrgather_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgather_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -2157,7 +2157,7 @@ vint64m4_t test_vrgather_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgather_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -2169,7 +2169,7 @@ vint64m8_t test_vrgather_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgather_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -2180,7 +2180,7 @@ vint64m8_t test_vrgather_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgather_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, @@ -2192,7 +2192,7 @@ vuint8mf8_t test_vrgather_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgather_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, @@ -2203,7 +2203,7 @@ vuint8mf8_t test_vrgather_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgather_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, @@ -2215,7 +2215,7 @@ vuint8mf4_t test_vrgather_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgather_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, @@ -2226,7 +2226,7 @@ vuint8mf4_t test_vrgather_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgather_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, @@ -2238,7 +2238,7 @@ vuint8mf2_t test_vrgather_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgather_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, @@ -2249,7 +2249,7 @@ vuint8mf2_t test_vrgather_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgather_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, @@ -2261,7 +2261,7 @@ vuint8m1_t test_vrgather_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgather_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, @@ -2272,7 +2272,7 @@ vuint8m1_t test_vrgather_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgather_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, @@ -2284,7 +2284,7 @@ vuint8m2_t test_vrgather_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgather_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, @@ -2295,7 +2295,7 @@ vuint8m2_t test_vrgather_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgather_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, @@ -2307,7 +2307,7 @@ vuint8m4_t test_vrgather_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgather_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, @@ -2318,7 +2318,7 @@ vuint8m4_t test_vrgather_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrgather_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, @@ -2330,7 +2330,7 @@ vuint8m8_t test_vrgather_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrgather_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, @@ -2341,7 +2341,7 @@ vuint8m8_t test_vrgather_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgather_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -2353,7 +2353,7 @@ vuint16mf4_t test_vrgather_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgather_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -2365,7 +2365,7 @@ vuint16mf4_t test_vrgather_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgather_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -2377,7 +2377,7 @@ vuint16mf2_t test_vrgather_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgather_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -2389,7 +2389,7 @@ vuint16mf2_t test_vrgather_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgather_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -2401,7 +2401,7 @@ vuint16m1_t test_vrgather_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgather_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -2412,7 +2412,7 @@ vuint16m1_t test_vrgather_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgather_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -2424,7 +2424,7 @@ vuint16m2_t test_vrgather_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgather_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -2435,7 +2435,7 @@ vuint16m2_t test_vrgather_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgather_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -2447,7 +2447,7 @@ vuint16m4_t test_vrgather_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgather_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -2458,7 +2458,7 @@ vuint16m4_t test_vrgather_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgather_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -2470,7 +2470,7 @@ vuint16m8_t test_vrgather_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgather_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -2481,7 +2481,7 @@ vuint16m8_t test_vrgather_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgather_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -2493,7 +2493,7 @@ vuint32mf2_t test_vrgather_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgather_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -2505,7 +2505,7 @@ vuint32mf2_t test_vrgather_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgather_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -2517,7 +2517,7 @@ vuint32m1_t test_vrgather_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgather_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -2528,7 +2528,7 @@ vuint32m1_t test_vrgather_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgather_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -2540,7 +2540,7 @@ vuint32m2_t test_vrgather_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgather_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -2551,7 +2551,7 @@ vuint32m2_t test_vrgather_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgather_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -2563,7 +2563,7 @@ vuint32m4_t test_vrgather_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgather_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -2574,7 +2574,7 @@ vuint32m4_t test_vrgather_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgather_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -2586,7 +2586,7 @@ vuint32m8_t test_vrgather_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgather_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -2597,7 +2597,7 @@ vuint32m8_t test_vrgather_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgather_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -2609,7 +2609,7 @@ vuint64m1_t test_vrgather_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgather_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -2620,7 +2620,7 @@ vuint64m1_t test_vrgather_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgather_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -2632,7 +2632,7 @@ vuint64m2_t test_vrgather_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgather_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -2643,7 +2643,7 @@ vuint64m2_t test_vrgather_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgather_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -2655,7 +2655,7 @@ vuint64m4_t test_vrgather_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgather_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -2666,7 +2666,7 @@ vuint64m4_t test_vrgather_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgather_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, @@ -2678,7 +2678,7 @@ vuint64m8_t test_vrgather_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgather_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, @@ -2689,7 +2689,7 @@ vuint64m8_t test_vrgather_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgather_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -2701,7 +2701,7 @@ vfloat32mf2_t test_vrgather_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgather_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -2713,7 +2713,7 @@ vfloat32mf2_t test_vrgather_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgather_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -2725,7 +2725,7 @@ vfloat32m1_t test_vrgather_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgather_vx_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -2737,7 +2737,7 @@ vfloat32m1_t test_vrgather_vx_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgather_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -2749,7 +2749,7 @@ vfloat32m2_t test_vrgather_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgather_vx_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -2761,7 +2761,7 @@ vfloat32m2_t test_vrgather_vx_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgather_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -2773,7 +2773,7 @@ vfloat32m4_t test_vrgather_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgather_vx_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -2785,7 +2785,7 @@ vfloat32m4_t test_vrgather_vx_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgather_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -2797,7 +2797,7 @@ vfloat32m8_t test_vrgather_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgather_vx_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -2809,7 +2809,7 @@ vfloat32m8_t test_vrgather_vx_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgather_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -2821,7 +2821,7 @@ vfloat64m1_t test_vrgather_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgather_vx_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -2833,7 +2833,7 @@ vfloat64m1_t test_vrgather_vx_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgather_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -2845,7 +2845,7 @@ vfloat64m2_t test_vrgather_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgather_vx_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -2857,7 +2857,7 @@ vfloat64m2_t test_vrgather_vx_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgather_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -2869,7 +2869,7 @@ vfloat64m4_t test_vrgather_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgather_vx_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -2881,7 +2881,7 @@ vfloat64m4_t test_vrgather_vx_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgather_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -2893,7 +2893,7 @@ vfloat64m8_t test_vrgather_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgather_vx_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgather_vx_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -2905,7 +2905,7 @@ vfloat64m8_t test_vrgather_vx_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgatherei16_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, @@ -2917,7 +2917,7 @@ vint8mf8_t test_vrgatherei16_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgatherei16_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, @@ -2929,7 +2929,7 @@ vint8mf4_t test_vrgatherei16_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgatherei16_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, @@ -2941,7 +2941,7 @@ vint8mf2_t test_vrgatherei16_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgatherei16_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, @@ -2953,7 +2953,7 @@ vint8m1_t test_vrgatherei16_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgatherei16_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, @@ -2965,7 +2965,7 @@ vint8m2_t test_vrgatherei16_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgatherei16_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, @@ -2977,7 +2977,7 @@ vint8m4_t test_vrgatherei16_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgatherei16_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -2989,7 +2989,7 @@ vint16mf4_t test_vrgatherei16_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgatherei16_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -3001,7 +3001,7 @@ vint16mf2_t test_vrgatherei16_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgatherei16_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -3013,7 +3013,7 @@ vint16m1_t test_vrgatherei16_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgatherei16_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -3025,7 +3025,7 @@ vint16m2_t test_vrgatherei16_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgatherei16_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -3037,7 +3037,7 @@ vint16m4_t test_vrgatherei16_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgatherei16_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -3049,7 +3049,7 @@ vint16m8_t test_vrgatherei16_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgatherei16_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -3061,7 +3061,7 @@ vint32mf2_t test_vrgatherei16_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgatherei16_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -3073,7 +3073,7 @@ vint32m1_t test_vrgatherei16_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgatherei16_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -3085,7 +3085,7 @@ vint32m2_t test_vrgatherei16_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgatherei16_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -3097,7 +3097,7 @@ vint32m4_t test_vrgatherei16_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgatherei16_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -3109,7 +3109,7 @@ vint32m8_t test_vrgatherei16_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgatherei16_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -3121,7 +3121,7 @@ vint64m1_t test_vrgatherei16_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgatherei16_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -3133,7 +3133,7 @@ vint64m2_t test_vrgatherei16_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgatherei16_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -3145,7 +3145,7 @@ vint64m4_t test_vrgatherei16_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgatherei16_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -3157,7 +3157,7 @@ vint64m8_t test_vrgatherei16_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgatherei16_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, @@ -3169,7 +3169,7 @@ vuint8mf8_t test_vrgatherei16_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgatherei16_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, @@ -3181,7 +3181,7 @@ vuint8mf4_t test_vrgatherei16_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgatherei16_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, @@ -3193,7 +3193,7 @@ vuint8mf2_t test_vrgatherei16_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgatherei16_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, @@ -3205,7 +3205,7 @@ vuint8m1_t test_vrgatherei16_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgatherei16_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, @@ -3217,7 +3217,7 @@ vuint8m2_t test_vrgatherei16_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgatherei16_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, @@ -3229,7 +3229,7 @@ vuint8m4_t test_vrgatherei16_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgatherei16_vv_u16mf4_m(vbool64_t mask, @@ -3242,7 +3242,7 @@ vuint16mf4_t test_vrgatherei16_vv_u16mf4_m(vbool64_t mask, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgatherei16_vv_u16mf2_m(vbool32_t mask, @@ -3255,7 +3255,7 @@ vuint16mf2_t test_vrgatherei16_vv_u16mf2_m(vbool32_t mask, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgatherei16_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -3267,7 +3267,7 @@ vuint16m1_t test_vrgatherei16_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgatherei16_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -3279,7 +3279,7 @@ vuint16m2_t test_vrgatherei16_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgatherei16_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -3291,7 +3291,7 @@ vuint16m4_t test_vrgatherei16_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgatherei16_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -3303,7 +3303,7 @@ vuint16m8_t test_vrgatherei16_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgatherei16_vv_u32mf2_m(vbool64_t mask, @@ -3316,7 +3316,7 @@ vuint32mf2_t test_vrgatherei16_vv_u32mf2_m(vbool64_t mask, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgatherei16_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -3328,7 +3328,7 @@ vuint32m1_t test_vrgatherei16_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgatherei16_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -3340,7 +3340,7 @@ vuint32m2_t test_vrgatherei16_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgatherei16_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -3352,7 +3352,7 @@ vuint32m4_t test_vrgatherei16_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgatherei16_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -3364,7 +3364,7 @@ vuint32m8_t test_vrgatherei16_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgatherei16_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -3376,7 +3376,7 @@ vuint64m1_t test_vrgatherei16_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgatherei16_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -3388,7 +3388,7 @@ vuint64m2_t test_vrgatherei16_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgatherei16_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -3400,7 +3400,7 @@ vuint64m4_t test_vrgatherei16_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgatherei16_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, @@ -3412,7 +3412,7 @@ vuint64m8_t test_vrgatherei16_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgatherei16_vv_f32mf2_m(vbool64_t mask, @@ -3425,7 +3425,7 @@ vfloat32mf2_t test_vrgatherei16_vv_f32mf2_m(vbool64_t mask, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgatherei16_vv_f32m1_m(vbool32_t mask, @@ -3438,7 +3438,7 @@ vfloat32m1_t test_vrgatherei16_vv_f32m1_m(vbool32_t mask, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgatherei16_vv_f32m2_m(vbool16_t mask, @@ -3451,7 +3451,7 @@ vfloat32m2_t test_vrgatherei16_vv_f32m2_m(vbool16_t mask, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgatherei16_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -3463,7 +3463,7 @@ vfloat32m4_t test_vrgatherei16_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgatherei16_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -3475,7 +3475,7 @@ vfloat32m8_t test_vrgatherei16_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgatherei16_vv_f64m1_m(vbool64_t mask, @@ -3488,7 +3488,7 @@ vfloat64m1_t test_vrgatherei16_vv_f64m1_m(vbool64_t mask, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgatherei16_vv_f64m2_m(vbool32_t mask, @@ -3501,7 +3501,7 @@ vfloat64m2_t test_vrgatherei16_vv_f64m2_m(vbool32_t mask, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgatherei16_vv_f64m4_m(vbool16_t mask, @@ -3514,7 +3514,7 @@ vfloat64m4_t test_vrgatherei16_vv_f64m4_m(vbool16_t mask, // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgatherei16_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c index 2f5b379..8ecd01f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c @@ -447,7 +447,7 @@ vuint64m8_t test_vrsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { // // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { @@ -457,7 +457,7 @@ vint8mf8_t test_vrsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_ // // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { @@ -467,7 +467,7 @@ vint8mf4_t test_vrsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_ // // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { @@ -477,7 +477,7 @@ vint8mf2_t test_vrsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_ // // CHECK-RV64-LABEL: @test_vrsub_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrsub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { @@ -487,7 +487,7 @@ vint8m1_t test_vrsub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1 // // CHECK-RV64-LABEL: @test_vrsub_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrsub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { @@ -497,7 +497,7 @@ vint8m2_t test_vrsub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1 // // CHECK-RV64-LABEL: @test_vrsub_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrsub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { @@ -507,7 +507,7 @@ vint8m4_t test_vrsub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1 // // CHECK-RV64-LABEL: @test_vrsub_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrsub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { @@ -517,7 +517,7 @@ vint8m8_t test_vrsub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1 // // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { @@ -527,7 +527,7 @@ vint16mf4_t test_vrsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16 // // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { @@ -537,7 +537,7 @@ vint16mf2_t test_vrsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16 // // CHECK-RV64-LABEL: @test_vrsub_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrsub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { @@ -547,7 +547,7 @@ vint16m1_t test_vrsub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_ // // CHECK-RV64-LABEL: @test_vrsub_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrsub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { @@ -557,7 +557,7 @@ vint16m2_t test_vrsub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // // CHECK-RV64-LABEL: @test_vrsub_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrsub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { @@ -567,7 +567,7 @@ vint16m4_t test_vrsub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // // CHECK-RV64-LABEL: @test_vrsub_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrsub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { @@ -577,7 +577,7 @@ vint16m8_t test_vrsub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // // CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { @@ -587,7 +587,7 @@ vint32mf2_t test_vrsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32 // // CHECK-RV64-LABEL: @test_vrsub_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrsub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { @@ -597,7 +597,7 @@ vint32m1_t test_vrsub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_ // // CHECK-RV64-LABEL: @test_vrsub_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrsub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { @@ -607,7 +607,7 @@ vint32m2_t test_vrsub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_ // // CHECK-RV64-LABEL: @test_vrsub_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrsub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { @@ -617,7 +617,7 @@ vint32m4_t test_vrsub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // // CHECK-RV64-LABEL: @test_vrsub_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrsub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { @@ -627,7 +627,7 @@ vint32m8_t test_vrsub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // // CHECK-RV64-LABEL: @test_vrsub_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrsub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { @@ -637,7 +637,7 @@ vint64m1_t test_vrsub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_ // // CHECK-RV64-LABEL: @test_vrsub_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrsub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { @@ -647,7 +647,7 @@ vint64m2_t test_vrsub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_ // // CHECK-RV64-LABEL: @test_vrsub_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrsub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { @@ -657,7 +657,7 @@ vint64m4_t test_vrsub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_ // // CHECK-RV64-LABEL: @test_vrsub_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrsub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { @@ -667,7 +667,7 @@ vint64m8_t test_vrsub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -677,7 +677,7 @@ vuint8mf8_t test_vrsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8m // // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -687,7 +687,7 @@ vuint8mf4_t test_vrsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8m // // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -697,7 +697,7 @@ vuint8mf2_t test_vrsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8m // // CHECK-RV64-LABEL: @test_vrsub_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -707,7 +707,7 @@ vuint8m1_t test_vrsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t // // CHECK-RV64-LABEL: @test_vrsub_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -717,7 +717,7 @@ vuint8m2_t test_vrsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t // // CHECK-RV64-LABEL: @test_vrsub_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -727,7 +727,7 @@ vuint8m4_t test_vrsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t // // CHECK-RV64-LABEL: @test_vrsub_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -737,7 +737,7 @@ vuint8m8_t test_vrsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t // // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -747,7 +747,7 @@ vuint16mf4_t test_vrsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -757,7 +757,7 @@ vuint16mf2_t test_vrsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vrsub_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -767,7 +767,7 @@ vuint16m1_t test_vrsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16 // // CHECK-RV64-LABEL: @test_vrsub_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -777,7 +777,7 @@ vuint16m2_t test_vrsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m // // CHECK-RV64-LABEL: @test_vrsub_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -787,7 +787,7 @@ vuint16m4_t test_vrsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m // // CHECK-RV64-LABEL: @test_vrsub_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -797,7 +797,7 @@ vuint16m8_t test_vrsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m // // CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -807,7 +807,7 @@ vuint32mf2_t test_vrsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuin // // CHECK-RV64-LABEL: @test_vrsub_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -817,7 +817,7 @@ vuint32m1_t test_vrsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32 // // CHECK-RV64-LABEL: @test_vrsub_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -827,7 +827,7 @@ vuint32m2_t test_vrsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32 // // CHECK-RV64-LABEL: @test_vrsub_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -837,7 +837,7 @@ vuint32m4_t test_vrsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vrsub_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -847,7 +847,7 @@ vuint32m8_t test_vrsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vrsub_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -857,7 +857,7 @@ vuint64m1_t test_vrsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64 // // CHECK-RV64-LABEL: @test_vrsub_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -867,7 +867,7 @@ vuint64m2_t test_vrsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64 // // CHECK-RV64-LABEL: @test_vrsub_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -877,7 +877,7 @@ vuint64m4_t test_vrsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64 // // CHECK-RV64-LABEL: @test_vrsub_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c index 197f7fc..2763370 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c @@ -890,7 +890,7 @@ vuint64m8_t test_vsaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { // // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, @@ -901,7 +901,7 @@ vint8mf8_t test_vsadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, // // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, @@ -912,7 +912,7 @@ vint8mf8_t test_vsadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, // // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, @@ -923,7 +923,7 @@ vint8mf4_t test_vsadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, @@ -934,7 +934,7 @@ vint8mf4_t test_vsadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, @@ -945,7 +945,7 @@ vint8mf2_t test_vsadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, @@ -956,7 +956,7 @@ vint8mf2_t test_vsadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsadd_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, @@ -967,7 +967,7 @@ vint8m1_t test_vsadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vsadd_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, @@ -978,7 +978,7 @@ vint8m1_t test_vsadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vsadd_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, @@ -989,7 +989,7 @@ vint8m2_t test_vsadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsadd_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, @@ -1000,7 +1000,7 @@ vint8m2_t test_vsadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsadd_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, @@ -1011,7 +1011,7 @@ vint8m4_t test_vsadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vsadd_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, @@ -1022,7 +1022,7 @@ vint8m4_t test_vsadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vsadd_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, @@ -1033,7 +1033,7 @@ vint8m8_t test_vsadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vsadd_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, @@ -1044,7 +1044,7 @@ vint8m8_t test_vsadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -1056,7 +1056,7 @@ vint16mf4_t test_vsadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -1067,7 +1067,7 @@ vint16mf4_t test_vsadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -1079,7 +1079,7 @@ vint16mf2_t test_vsadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -1090,7 +1090,7 @@ vint16mf2_t test_vsadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsadd_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -1101,7 +1101,7 @@ vint16m1_t test_vsadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vsadd_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -1112,7 +1112,7 @@ vint16m1_t test_vsadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vsadd_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -1123,7 +1123,7 @@ vint16m2_t test_vsadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsadd_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -1134,7 +1134,7 @@ vint16m2_t test_vsadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsadd_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -1145,7 +1145,7 @@ vint16m4_t test_vsadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vsadd_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -1156,7 +1156,7 @@ vint16m4_t test_vsadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vsadd_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -1167,7 +1167,7 @@ vint16m8_t test_vsadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vsadd_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -1178,7 +1178,7 @@ vint16m8_t test_vsadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -1190,7 +1190,7 @@ vint32mf2_t test_vsadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -1201,7 +1201,7 @@ vint32mf2_t test_vsadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsadd_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -1212,7 +1212,7 @@ vint32m1_t test_vsadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vsadd_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -1223,7 +1223,7 @@ vint32m1_t test_vsadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vsadd_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -1234,7 +1234,7 @@ vint32m2_t test_vsadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsadd_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -1245,7 +1245,7 @@ vint32m2_t test_vsadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsadd_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -1256,7 +1256,7 @@ vint32m4_t test_vsadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vsadd_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -1267,7 +1267,7 @@ vint32m4_t test_vsadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vsadd_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -1278,7 +1278,7 @@ vint32m8_t test_vsadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vsadd_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -1289,7 +1289,7 @@ vint32m8_t test_vsadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vsadd_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -1300,7 +1300,7 @@ vint64m1_t test_vsadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vsadd_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -1311,7 +1311,7 @@ vint64m1_t test_vsadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vsadd_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -1322,7 +1322,7 @@ vint64m2_t test_vsadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsadd_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -1333,7 +1333,7 @@ vint64m2_t test_vsadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsadd_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -1344,7 +1344,7 @@ vint64m4_t test_vsadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vsadd_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -1355,7 +1355,7 @@ vint64m4_t test_vsadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vsadd_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -1366,7 +1366,7 @@ vint64m8_t test_vsadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vsadd_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -1377,7 +1377,7 @@ vint64m8_t test_vsadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, @@ -1389,7 +1389,7 @@ vuint8mf8_t test_vsaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, // // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, @@ -1400,7 +1400,7 @@ vuint8mf8_t test_vsaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, // // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, @@ -1412,7 +1412,7 @@ vuint8mf4_t test_vsaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, @@ -1423,7 +1423,7 @@ vuint8mf4_t test_vsaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, @@ -1435,7 +1435,7 @@ vuint8mf2_t test_vsaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, @@ -1446,7 +1446,7 @@ vuint8mf2_t test_vsaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, @@ -1457,7 +1457,7 @@ vuint8m1_t test_vsaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, @@ -1468,7 +1468,7 @@ vuint8m1_t test_vsaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, @@ -1479,7 +1479,7 @@ vuint8m2_t test_vsaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, @@ -1490,7 +1490,7 @@ vuint8m2_t test_vsaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, @@ -1501,7 +1501,7 @@ vuint8m4_t test_vsaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, @@ -1512,7 +1512,7 @@ vuint8m4_t test_vsaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, @@ -1523,7 +1523,7 @@ vuint8m8_t test_vsaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, @@ -1534,7 +1534,7 @@ vuint8m8_t test_vsaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -1546,7 +1546,7 @@ vuint16mf4_t test_vsaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -1558,7 +1558,7 @@ vuint16mf4_t test_vsaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -1570,7 +1570,7 @@ vuint16mf2_t test_vsaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -1582,7 +1582,7 @@ vuint16mf2_t test_vsaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -1594,7 +1594,7 @@ vuint16m1_t test_vsaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -1605,7 +1605,7 @@ vuint16m1_t test_vsaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -1617,7 +1617,7 @@ vuint16m2_t test_vsaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -1628,7 +1628,7 @@ vuint16m2_t test_vsaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -1640,7 +1640,7 @@ vuint16m4_t test_vsaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -1651,7 +1651,7 @@ vuint16m4_t test_vsaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -1663,7 +1663,7 @@ vuint16m8_t test_vsaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -1674,7 +1674,7 @@ vuint16m8_t test_vsaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -1686,7 +1686,7 @@ vuint32mf2_t test_vsaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -1698,7 +1698,7 @@ vuint32mf2_t test_vsaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -1710,7 +1710,7 @@ vuint32m1_t test_vsaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -1721,7 +1721,7 @@ vuint32m1_t test_vsaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -1733,7 +1733,7 @@ vuint32m2_t test_vsaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -1744,7 +1744,7 @@ vuint32m2_t test_vsaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -1756,7 +1756,7 @@ vuint32m4_t test_vsaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -1767,7 +1767,7 @@ vuint32m4_t test_vsaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -1779,7 +1779,7 @@ vuint32m8_t test_vsaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -1790,7 +1790,7 @@ vuint32m8_t test_vsaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -1802,7 +1802,7 @@ vuint64m1_t test_vsaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -1813,7 +1813,7 @@ vuint64m1_t test_vsaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -1825,7 +1825,7 @@ vuint64m2_t test_vsaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -1836,7 +1836,7 @@ vuint64m2_t test_vsaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -1848,7 +1848,7 @@ vuint64m4_t test_vsaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -1859,7 +1859,7 @@ vuint64m4_t test_vsaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, @@ -1871,7 +1871,7 @@ vuint64m8_t test_vsaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vse.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vse.c index 84fd860..4e7bb76 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vse.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vse.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vse8_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11:[0-9]+]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse8_v_i8mf8(int8_t *base, vint8mf8_t value, size_t vl) { @@ -20,7 +20,7 @@ void test_vse8_v_i8mf8(int8_t *base, vint8mf8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse8_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse8_v_i8mf4(int8_t *base, vint8mf4_t value, size_t vl) { @@ -31,7 +31,7 @@ void test_vse8_v_i8mf4(int8_t *base, vint8mf4_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse8_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse8_v_i8mf2(int8_t *base, vint8mf2_t value, size_t vl) { @@ -42,7 +42,7 @@ void test_vse8_v_i8mf2(int8_t *base, vint8mf2_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse8_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse8_v_i8m1(int8_t *base, vint8m1_t value, size_t vl) { @@ -53,7 +53,7 @@ void test_vse8_v_i8m1(int8_t *base, vint8m1_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse8_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse8_v_i8m2(int8_t *base, vint8m2_t value, size_t vl) { @@ -64,7 +64,7 @@ void test_vse8_v_i8m2(int8_t *base, vint8m2_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse8_v_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse8_v_i8m4(int8_t *base, vint8m4_t value, size_t vl) { @@ -75,7 +75,7 @@ void test_vse8_v_i8m4(int8_t *base, vint8m4_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse8_v_i8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse8_v_i8m8(int8_t *base, vint8m8_t value, size_t vl) { @@ -86,7 +86,7 @@ void test_vse8_v_i8m8(int8_t *base, vint8m8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse16_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse16_v_i16mf4(int16_t *base, vint16mf4_t value, size_t vl) { @@ -97,7 +97,7 @@ void test_vse16_v_i16mf4(int16_t *base, vint16mf4_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse16_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse16_v_i16mf2(int16_t *base, vint16mf2_t value, size_t vl) { @@ -108,7 +108,7 @@ void test_vse16_v_i16mf2(int16_t *base, vint16mf2_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse16_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse16_v_i16m1(int16_t *base, vint16m1_t value, size_t vl) { @@ -119,7 +119,7 @@ void test_vse16_v_i16m1(int16_t *base, vint16m1_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse16_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse16_v_i16m2(int16_t *base, vint16m2_t value, size_t vl) { @@ -130,7 +130,7 @@ void test_vse16_v_i16m2(int16_t *base, vint16m2_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse16_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse16_v_i16m4(int16_t *base, vint16m4_t value, size_t vl) { @@ -141,7 +141,7 @@ void test_vse16_v_i16m4(int16_t *base, vint16m4_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse16_v_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse16_v_i16m8(int16_t *base, vint16m8_t value, size_t vl) { @@ -152,7 +152,7 @@ void test_vse16_v_i16m8(int16_t *base, vint16m8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse32_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse32_v_i32mf2(int32_t *base, vint32mf2_t value, size_t vl) { @@ -163,7 +163,7 @@ void test_vse32_v_i32mf2(int32_t *base, vint32mf2_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse32_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse32_v_i32m1(int32_t *base, vint32m1_t value, size_t vl) { @@ -174,7 +174,7 @@ void test_vse32_v_i32m1(int32_t *base, vint32m1_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse32_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse32_v_i32m2(int32_t *base, vint32m2_t value, size_t vl) { @@ -185,7 +185,7 @@ void test_vse32_v_i32m2(int32_t *base, vint32m2_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse32_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse32_v_i32m4(int32_t *base, vint32m4_t value, size_t vl) { @@ -196,7 +196,7 @@ void test_vse32_v_i32m4(int32_t *base, vint32m4_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse32_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse32_v_i32m8(int32_t *base, vint32m8_t value, size_t vl) { @@ -207,7 +207,7 @@ void test_vse32_v_i32m8(int32_t *base, vint32m8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse64_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse64_v_i64m1(int64_t *base, vint64m1_t value, size_t vl) { @@ -218,7 +218,7 @@ void test_vse64_v_i64m1(int64_t *base, vint64m1_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse64_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse64_v_i64m2(int64_t *base, vint64m2_t value, size_t vl) { @@ -229,7 +229,7 @@ void test_vse64_v_i64m2(int64_t *base, vint64m2_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse64_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse64_v_i64m4(int64_t *base, vint64m4_t value, size_t vl) { @@ -240,7 +240,7 @@ void test_vse64_v_i64m4(int64_t *base, vint64m4_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse64_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse64_v_i64m8(int64_t *base, vint64m8_t value, size_t vl) { @@ -251,7 +251,7 @@ void test_vse64_v_i64m8(int64_t *base, vint64m8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse8_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse8_v_u8mf8(uint8_t *base, vuint8mf8_t value, size_t vl) { @@ -262,7 +262,7 @@ void test_vse8_v_u8mf8(uint8_t *base, vuint8mf8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse8_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse8_v_u8mf4(uint8_t *base, vuint8mf4_t value, size_t vl) { @@ -273,7 +273,7 @@ void test_vse8_v_u8mf4(uint8_t *base, vuint8mf4_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse8_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse8_v_u8mf2(uint8_t *base, vuint8mf2_t value, size_t vl) { @@ -284,7 +284,7 @@ void test_vse8_v_u8mf2(uint8_t *base, vuint8mf2_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse8_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse8_v_u8m1(uint8_t *base, vuint8m1_t value, size_t vl) { @@ -295,7 +295,7 @@ void test_vse8_v_u8m1(uint8_t *base, vuint8m1_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse8_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse8_v_u8m2(uint8_t *base, vuint8m2_t value, size_t vl) { @@ -306,7 +306,7 @@ void test_vse8_v_u8m2(uint8_t *base, vuint8m2_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse8_v_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse8_v_u8m4(uint8_t *base, vuint8m4_t value, size_t vl) { @@ -317,7 +317,7 @@ void test_vse8_v_u8m4(uint8_t *base, vuint8m4_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse8_v_u8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse8_v_u8m8(uint8_t *base, vuint8m8_t value, size_t vl) { @@ -328,7 +328,7 @@ void test_vse8_v_u8m8(uint8_t *base, vuint8m8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse16_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse16_v_u16mf4(uint16_t *base, vuint16mf4_t value, size_t vl) { @@ -339,7 +339,7 @@ void test_vse16_v_u16mf4(uint16_t *base, vuint16mf4_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse16_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse16_v_u16mf2(uint16_t *base, vuint16mf2_t value, size_t vl) { @@ -350,7 +350,7 @@ void test_vse16_v_u16mf2(uint16_t *base, vuint16mf2_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse16_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse16_v_u16m1(uint16_t *base, vuint16m1_t value, size_t vl) { @@ -361,7 +361,7 @@ void test_vse16_v_u16m1(uint16_t *base, vuint16m1_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse16_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse16_v_u16m2(uint16_t *base, vuint16m2_t value, size_t vl) { @@ -372,7 +372,7 @@ void test_vse16_v_u16m2(uint16_t *base, vuint16m2_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse16_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse16_v_u16m4(uint16_t *base, vuint16m4_t value, size_t vl) { @@ -383,7 +383,7 @@ void test_vse16_v_u16m4(uint16_t *base, vuint16m4_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse16_v_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse16_v_u16m8(uint16_t *base, vuint16m8_t value, size_t vl) { @@ -394,7 +394,7 @@ void test_vse16_v_u16m8(uint16_t *base, vuint16m8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse32_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse32_v_u32mf2(uint32_t *base, vuint32mf2_t value, size_t vl) { @@ -405,7 +405,7 @@ void test_vse32_v_u32mf2(uint32_t *base, vuint32mf2_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse32_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse32_v_u32m1(uint32_t *base, vuint32m1_t value, size_t vl) { @@ -416,7 +416,7 @@ void test_vse32_v_u32m1(uint32_t *base, vuint32m1_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse32_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse32_v_u32m2(uint32_t *base, vuint32m2_t value, size_t vl) { @@ -427,7 +427,7 @@ void test_vse32_v_u32m2(uint32_t *base, vuint32m2_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse32_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse32_v_u32m4(uint32_t *base, vuint32m4_t value, size_t vl) { @@ -438,7 +438,7 @@ void test_vse32_v_u32m4(uint32_t *base, vuint32m4_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse32_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse32_v_u32m8(uint32_t *base, vuint32m8_t value, size_t vl) { @@ -449,7 +449,7 @@ void test_vse32_v_u32m8(uint32_t *base, vuint32m8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse64_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse64_v_u64m1(uint64_t *base, vuint64m1_t value, size_t vl) { @@ -460,7 +460,7 @@ void test_vse64_v_u64m1(uint64_t *base, vuint64m1_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse64_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse64_v_u64m2(uint64_t *base, vuint64m2_t value, size_t vl) { @@ -471,7 +471,7 @@ void test_vse64_v_u64m2(uint64_t *base, vuint64m2_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse64_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse64_v_u64m4(uint64_t *base, vuint64m4_t value, size_t vl) { @@ -482,7 +482,7 @@ void test_vse64_v_u64m4(uint64_t *base, vuint64m4_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse64_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse64_v_u64m8(uint64_t *base, vuint64m8_t value, size_t vl) { @@ -493,7 +493,7 @@ void test_vse64_v_u64m8(uint64_t *base, vuint64m8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse32_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse32_v_f32mf2(float *base, vfloat32mf2_t value, size_t vl) { @@ -504,7 +504,7 @@ void test_vse32_v_f32mf2(float *base, vfloat32mf2_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse32_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse32_v_f32m1(float *base, vfloat32m1_t value, size_t vl) { @@ -515,7 +515,7 @@ void test_vse32_v_f32m1(float *base, vfloat32m1_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse32_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse32_v_f32m2(float *base, vfloat32m2_t value, size_t vl) { @@ -526,7 +526,7 @@ void test_vse32_v_f32m2(float *base, vfloat32m2_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse32_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse32_v_f32m4(float *base, vfloat32m4_t value, size_t vl) { @@ -537,7 +537,7 @@ void test_vse32_v_f32m4(float *base, vfloat32m4_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse32_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse32_v_f32m8(float *base, vfloat32m8_t value, size_t vl) { @@ -548,7 +548,7 @@ void test_vse32_v_f32m8(float *base, vfloat32m8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse64_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse64_v_f64m1(double *base, vfloat64m1_t value, size_t vl) { @@ -559,7 +559,7 @@ void test_vse64_v_f64m1(double *base, vfloat64m1_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse64_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse64_v_f64m2(double *base, vfloat64m2_t value, size_t vl) { @@ -570,7 +570,7 @@ void test_vse64_v_f64m2(double *base, vfloat64m2_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse64_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse64_v_f64m4(double *base, vfloat64m4_t value, size_t vl) { @@ -581,7 +581,7 @@ void test_vse64_v_f64m4(double *base, vfloat64m4_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse64_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse64_v_f64m8(double *base, vfloat64m8_t value, size_t vl) { @@ -592,7 +592,7 @@ void test_vse64_v_f64m8(double *base, vfloat64m8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t value, size_t vl) { @@ -603,7 +603,7 @@ void test_vse8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t value, size_t // CHECK-RV64-LABEL: @test_vse8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t value, size_t vl) { @@ -614,7 +614,7 @@ void test_vse8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t value, size_t // CHECK-RV64-LABEL: @test_vse8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t value, size_t vl) { @@ -625,7 +625,7 @@ void test_vse8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t value, size_t // CHECK-RV64-LABEL: @test_vse8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t value, size_t vl) { @@ -636,7 +636,7 @@ void test_vse8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t value, size_t vl) // CHECK-RV64-LABEL: @test_vse8_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse8_v_i8m2_m(vbool4_t mask, int8_t *base, vint8m2_t value, size_t vl) { @@ -647,7 +647,7 @@ void test_vse8_v_i8m2_m(vbool4_t mask, int8_t *base, vint8m2_t value, size_t vl) // CHECK-RV64-LABEL: @test_vse8_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse8_v_i8m4_m(vbool2_t mask, int8_t *base, vint8m4_t value, size_t vl) { @@ -658,7 +658,7 @@ void test_vse8_v_i8m4_m(vbool2_t mask, int8_t *base, vint8m4_t value, size_t vl) // CHECK-RV64-LABEL: @test_vse8_v_i8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse8_v_i8m8_m(vbool1_t mask, int8_t *base, vint8m8_t value, size_t vl) { @@ -669,7 +669,7 @@ void test_vse8_v_i8m8_m(vbool1_t mask, int8_t *base, vint8m8_t value, size_t vl) // CHECK-RV64-LABEL: @test_vse16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t value, size_t vl) { @@ -680,7 +680,7 @@ void test_vse16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t value, siz // CHECK-RV64-LABEL: @test_vse16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t value, size_t vl) { @@ -691,7 +691,7 @@ void test_vse16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t value, siz // CHECK-RV64-LABEL: @test_vse16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t value, size_t vl) { @@ -702,7 +702,7 @@ void test_vse16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t value, size_ // CHECK-RV64-LABEL: @test_vse16_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse16_v_i16m2_m(vbool8_t mask, int16_t *base, vint16m2_t value, size_t vl) { @@ -713,7 +713,7 @@ void test_vse16_v_i16m2_m(vbool8_t mask, int16_t *base, vint16m2_t value, size_t // CHECK-RV64-LABEL: @test_vse16_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse16_v_i16m4_m(vbool4_t mask, int16_t *base, vint16m4_t value, size_t vl) { @@ -724,7 +724,7 @@ void test_vse16_v_i16m4_m(vbool4_t mask, int16_t *base, vint16m4_t value, size_t // CHECK-RV64-LABEL: @test_vse16_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse16_v_i16m8_m(vbool2_t mask, int16_t *base, vint16m8_t value, size_t vl) { @@ -735,7 +735,7 @@ void test_vse16_v_i16m8_m(vbool2_t mask, int16_t *base, vint16m8_t value, size_t // CHECK-RV64-LABEL: @test_vse32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t value, size_t vl) { @@ -746,7 +746,7 @@ void test_vse32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t value, siz // CHECK-RV64-LABEL: @test_vse32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t value, size_t vl) { @@ -757,7 +757,7 @@ void test_vse32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t value, size_ // CHECK-RV64-LABEL: @test_vse32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse32_v_i32m2_m(vbool16_t mask, int32_t *base, vint32m2_t value, size_t vl) { @@ -768,7 +768,7 @@ void test_vse32_v_i32m2_m(vbool16_t mask, int32_t *base, vint32m2_t value, size_ // CHECK-RV64-LABEL: @test_vse32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse32_v_i32m4_m(vbool8_t mask, int32_t *base, vint32m4_t value, size_t vl) { @@ -779,7 +779,7 @@ void test_vse32_v_i32m4_m(vbool8_t mask, int32_t *base, vint32m4_t value, size_t // CHECK-RV64-LABEL: @test_vse32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse32_v_i32m8_m(vbool4_t mask, int32_t *base, vint32m8_t value, size_t vl) { @@ -790,7 +790,7 @@ void test_vse32_v_i32m8_m(vbool4_t mask, int32_t *base, vint32m8_t value, size_t // CHECK-RV64-LABEL: @test_vse64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t value, size_t vl) { @@ -801,7 +801,7 @@ void test_vse64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t value, size_ // CHECK-RV64-LABEL: @test_vse64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse64_v_i64m2_m(vbool32_t mask, int64_t *base, vint64m2_t value, size_t vl) { @@ -812,7 +812,7 @@ void test_vse64_v_i64m2_m(vbool32_t mask, int64_t *base, vint64m2_t value, size_ // CHECK-RV64-LABEL: @test_vse64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse64_v_i64m4_m(vbool16_t mask, int64_t *base, vint64m4_t value, size_t vl) { @@ -823,7 +823,7 @@ void test_vse64_v_i64m4_m(vbool16_t mask, int64_t *base, vint64m4_t value, size_ // CHECK-RV64-LABEL: @test_vse64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse64_v_i64m8_m(vbool8_t mask, int64_t *base, vint64m8_t value, size_t vl) { @@ -834,7 +834,7 @@ void test_vse64_v_i64m8_m(vbool8_t mask, int64_t *base, vint64m8_t value, size_t // CHECK-RV64-LABEL: @test_vse8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t value, size_t vl) { @@ -845,7 +845,7 @@ void test_vse8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t value, size_ // CHECK-RV64-LABEL: @test_vse8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t value, size_t vl) { @@ -856,7 +856,7 @@ void test_vse8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t value, size_ // CHECK-RV64-LABEL: @test_vse8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t value, size_t vl) { @@ -867,7 +867,7 @@ void test_vse8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t value, size_ // CHECK-RV64-LABEL: @test_vse8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t value, size_t vl) { @@ -878,7 +878,7 @@ void test_vse8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t value, size_t v // CHECK-RV64-LABEL: @test_vse8_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t value, size_t vl) { @@ -889,7 +889,7 @@ void test_vse8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t value, size_t v // CHECK-RV64-LABEL: @test_vse8_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t value, size_t vl) { @@ -900,7 +900,7 @@ void test_vse8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t value, size_t v // CHECK-RV64-LABEL: @test_vse8_v_u8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse8_v_u8m8_m(vbool1_t mask, uint8_t *base, vuint8m8_t value, size_t vl) { @@ -911,7 +911,7 @@ void test_vse8_v_u8m8_m(vbool1_t mask, uint8_t *base, vuint8m8_t value, size_t v // CHECK-RV64-LABEL: @test_vse16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t value, size_t vl) { @@ -922,7 +922,7 @@ void test_vse16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t value, s // CHECK-RV64-LABEL: @test_vse16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t value, size_t vl) { @@ -933,7 +933,7 @@ void test_vse16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t value, s // CHECK-RV64-LABEL: @test_vse16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t value, size_t vl) { @@ -944,7 +944,7 @@ void test_vse16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t value, siz // CHECK-RV64-LABEL: @test_vse16_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t value, size_t vl) { @@ -955,7 +955,7 @@ void test_vse16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t value, size // CHECK-RV64-LABEL: @test_vse16_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t value, size_t vl) { @@ -966,7 +966,7 @@ void test_vse16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t value, size // CHECK-RV64-LABEL: @test_vse16_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse16_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint16m8_t value, size_t vl) { @@ -977,7 +977,7 @@ void test_vse16_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint16m8_t value, size // CHECK-RV64-LABEL: @test_vse32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t value, size_t vl) { @@ -988,7 +988,7 @@ void test_vse32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t value, s // CHECK-RV64-LABEL: @test_vse32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t value, size_t vl) { @@ -999,7 +999,7 @@ void test_vse32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t value, siz // CHECK-RV64-LABEL: @test_vse32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t value, size_t vl) { @@ -1010,7 +1010,7 @@ void test_vse32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t value, siz // CHECK-RV64-LABEL: @test_vse32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t value, size_t vl) { @@ -1021,7 +1021,7 @@ void test_vse32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t value, size // CHECK-RV64-LABEL: @test_vse32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t value, size_t vl) { @@ -1032,7 +1032,7 @@ void test_vse32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t value, size // CHECK-RV64-LABEL: @test_vse64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t value, size_t vl) { @@ -1043,7 +1043,7 @@ void test_vse64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t value, siz // CHECK-RV64-LABEL: @test_vse64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t value, size_t vl) { @@ -1054,7 +1054,7 @@ void test_vse64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t value, siz // CHECK-RV64-LABEL: @test_vse64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t value, size_t vl) { @@ -1065,7 +1065,7 @@ void test_vse64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t value, siz // CHECK-RV64-LABEL: @test_vse64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse64_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint64m8_t value, size_t vl) { @@ -1076,7 +1076,7 @@ void test_vse64_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint64m8_t value, size // CHECK-RV64-LABEL: @test_vse32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1f32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1f32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t value, size_t vl) { @@ -1087,7 +1087,7 @@ void test_vse32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t value, siz // CHECK-RV64-LABEL: @test_vse32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2f32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2f32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t value, size_t vl) { @@ -1098,7 +1098,7 @@ void test_vse32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t value, size_ // CHECK-RV64-LABEL: @test_vse32_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4f32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4f32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse32_v_f32m2_m(vbool16_t mask, float *base, vfloat32m2_t value, size_t vl) { @@ -1109,7 +1109,7 @@ void test_vse32_v_f32m2_m(vbool16_t mask, float *base, vfloat32m2_t value, size_ // CHECK-RV64-LABEL: @test_vse32_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8f32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8f32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse32_v_f32m4_m(vbool8_t mask, float *base, vfloat32m4_t value, size_t vl) { @@ -1120,7 +1120,7 @@ void test_vse32_v_f32m4_m(vbool8_t mask, float *base, vfloat32m4_t value, size_t // CHECK-RV64-LABEL: @test_vse32_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16f32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16f32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse32_v_f32m8_m(vbool4_t mask, float *base, vfloat32m8_t value, size_t vl) { @@ -1131,7 +1131,7 @@ void test_vse32_v_f32m8_m(vbool4_t mask, float *base, vfloat32m8_t value, size_t // CHECK-RV64-LABEL: @test_vse64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1f64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1f64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t value, size_t vl) { @@ -1142,7 +1142,7 @@ void test_vse64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t value, size // CHECK-RV64-LABEL: @test_vse64_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2f64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2f64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse64_v_f64m2_m(vbool32_t mask, double *base, vfloat64m2_t value, size_t vl) { @@ -1153,7 +1153,7 @@ void test_vse64_v_f64m2_m(vbool32_t mask, double *base, vfloat64m2_t value, size // CHECK-RV64-LABEL: @test_vse64_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4f64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4f64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse64_v_f64m4_m(vbool16_t mask, double *base, vfloat64m4_t value, size_t vl) { @@ -1164,7 +1164,7 @@ void test_vse64_v_f64m4_m(vbool16_t mask, double *base, vfloat64m4_t value, size // CHECK-RV64-LABEL: @test_vse64_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8f64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8f64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse64_v_f64m8_m(vbool8_t mask, double *base, vfloat64m8_t value, size_t vl) { @@ -1175,7 +1175,7 @@ void test_vse64_v_f64m8_m(vbool8_t mask, double *base, vfloat64m8_t value, size_ // CHECK-RV64-LABEL: @test_vse1_v_b1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv64i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv64i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse1_v_b1(uint8_t *base, vbool1_t value, size_t vl) { @@ -1186,7 +1186,7 @@ void test_vse1_v_b1(uint8_t *base, vbool1_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse1_v_b2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv32i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv32i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse1_v_b2(uint8_t *base, vbool2_t value, size_t vl) { @@ -1197,7 +1197,7 @@ void test_vse1_v_b2(uint8_t *base, vbool2_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse1_v_b4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv16i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv16i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse1_v_b4(uint8_t *base, vbool4_t value, size_t vl) { @@ -1208,7 +1208,7 @@ void test_vse1_v_b4(uint8_t *base, vbool4_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse1_v_b8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv8i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv8i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse1_v_b8(uint8_t *base, vbool8_t value, size_t vl) { @@ -1219,7 +1219,7 @@ void test_vse1_v_b8(uint8_t *base, vbool8_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse1_v_b16( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv4i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv4i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse1_v_b16(uint8_t *base, vbool16_t value, size_t vl) { @@ -1230,7 +1230,7 @@ void test_vse1_v_b16(uint8_t *base, vbool16_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse1_v_b32( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv2i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv2i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse1_v_b32(uint8_t *base, vbool32_t value, size_t vl) { @@ -1241,7 +1241,7 @@ void test_vse1_v_b32(uint8_t *base, vbool32_t value, size_t vl) { // CHECK-RV64-LABEL: @test_vse1_v_b64( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv1i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv1i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vse1_v_b64(uint8_t *base, vbool64_t value, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsext.c index b5ca5da..64f8ab7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsext.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsext.c @@ -287,7 +287,7 @@ vint64m8_t test_vsext_vf2_i64m8(vint32m4_t op1, size_t vl) { // // CHECK-RV64-LABEL: @test_vsext_vf2_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsext_vf2_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -298,7 +298,7 @@ vint16mf4_t test_vsext_vf2_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vsext_vf2_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsext_vf2_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -309,7 +309,7 @@ vint16mf2_t test_vsext_vf2_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsext_vf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsext_vf2_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -320,7 +320,7 @@ vint16m1_t test_vsext_vf2_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vsext_vf2_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsext_vf2_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -331,7 +331,7 @@ vint16m2_t test_vsext_vf2_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsext_vf2_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsext_vf2_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -342,7 +342,7 @@ vint16m4_t test_vsext_vf2_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vsext_vf2_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsext_vf2_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -353,7 +353,7 @@ vint16m8_t test_vsext_vf2_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsext_vf4_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -364,7 +364,7 @@ vint32mf2_t test_vsext_vf4_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsext_vf4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsext_vf4_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -375,7 +375,7 @@ vint32m1_t test_vsext_vf4_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vsext_vf4_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsext_vf4_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -386,7 +386,7 @@ vint32m2_t test_vsext_vf4_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsext_vf4_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsext_vf4_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -397,7 +397,7 @@ vint32m4_t test_vsext_vf4_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vsext_vf4_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsext_vf4_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -408,7 +408,7 @@ vint32m8_t test_vsext_vf4_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsext_vf8_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -419,7 +419,7 @@ vint64m1_t test_vsext_vf8_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsext_vf8_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -430,7 +430,7 @@ vint64m2_t test_vsext_vf8_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsext_vf8_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -441,7 +441,7 @@ vint64m4_t test_vsext_vf8_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsext_vf8_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -452,7 +452,7 @@ vint64m8_t test_vsext_vf8_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsext_vf2_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -463,7 +463,7 @@ vint32mf2_t test_vsext_vf2_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsext_vf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsext_vf2_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -474,7 +474,7 @@ vint32m1_t test_vsext_vf2_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vsext_vf2_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsext_vf2_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -485,7 +485,7 @@ vint32m2_t test_vsext_vf2_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsext_vf2_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsext_vf2_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -496,7 +496,7 @@ vint32m4_t test_vsext_vf2_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vsext_vf2_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsext_vf2_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -507,7 +507,7 @@ vint32m8_t test_vsext_vf2_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vsext_vf4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsext_vf4_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -518,7 +518,7 @@ vint64m1_t test_vsext_vf4_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vsext_vf4_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsext_vf4_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -529,7 +529,7 @@ vint64m2_t test_vsext_vf4_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsext_vf4_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsext_vf4_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -540,7 +540,7 @@ vint64m4_t test_vsext_vf4_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vsext_vf4_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsext_vf4_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -551,7 +551,7 @@ vint64m8_t test_vsext_vf4_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsext_vf2_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -562,7 +562,7 @@ vint64m1_t test_vsext_vf2_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vsext_vf2_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsext_vf2_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -573,7 +573,7 @@ vint64m2_t test_vsext_vf2_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsext_vf2_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsext_vf2_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -584,7 +584,7 @@ vint64m4_t test_vsext_vf2_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vsext_vf2_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsext_vf2_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c index d424e2f..1d9d3c5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c @@ -468,7 +468,7 @@ vuint64m8_t test_vslide1down_vx_u64m8(vuint64m8_t src, uint64_t value, // // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslide1down_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, @@ -480,7 +480,7 @@ vint8mf8_t test_vslide1down_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslide1down_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, @@ -492,7 +492,7 @@ vint8mf4_t test_vslide1down_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslide1down_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, @@ -504,7 +504,7 @@ vint8mf2_t test_vslide1down_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslide1down_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, @@ -515,7 +515,7 @@ vint8m1_t test_vslide1down_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslide1down_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, @@ -526,7 +526,7 @@ vint8m2_t test_vslide1down_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslide1down_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, @@ -537,7 +537,7 @@ vint8m4_t test_vslide1down_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslide1down_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, @@ -548,7 +548,7 @@ vint8m8_t test_vslide1down_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslide1down_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -560,7 +560,7 @@ vint16mf4_t test_vslide1down_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslide1down_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -572,7 +572,7 @@ vint16mf2_t test_vslide1down_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslide1down_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -584,7 +584,7 @@ vint16m1_t test_vslide1down_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslide1down_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -596,7 +596,7 @@ vint16m2_t test_vslide1down_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslide1down_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -608,7 +608,7 @@ vint16m4_t test_vslide1down_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslide1down_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -620,7 +620,7 @@ vint16m8_t test_vslide1down_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslide1down_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -632,7 +632,7 @@ vint32mf2_t test_vslide1down_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslide1down_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -644,7 +644,7 @@ vint32m1_t test_vslide1down_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslide1down_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -656,7 +656,7 @@ vint32m2_t test_vslide1down_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslide1down_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -668,7 +668,7 @@ vint32m4_t test_vslide1down_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslide1down_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -680,7 +680,7 @@ vint32m8_t test_vslide1down_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslide1down_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -692,7 +692,7 @@ vint64m1_t test_vslide1down_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslide1down_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -704,7 +704,7 @@ vint64m2_t test_vslide1down_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslide1down_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -716,7 +716,7 @@ vint64m4_t test_vslide1down_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslide1down_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -728,7 +728,7 @@ vint64m8_t test_vslide1down_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslide1down_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, @@ -740,7 +740,7 @@ vuint8mf8_t test_vslide1down_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslide1down_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, @@ -752,7 +752,7 @@ vuint8mf4_t test_vslide1down_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslide1down_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, @@ -764,7 +764,7 @@ vuint8mf2_t test_vslide1down_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslide1down_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, @@ -776,7 +776,7 @@ vuint8m1_t test_vslide1down_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslide1down_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, @@ -788,7 +788,7 @@ vuint8m2_t test_vslide1down_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslide1down_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, @@ -800,7 +800,7 @@ vuint8m4_t test_vslide1down_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslide1down_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, @@ -812,7 +812,7 @@ vuint8m8_t test_vslide1down_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslide1down_vx_u16mf4_m(vbool64_t mask, @@ -825,7 +825,7 @@ vuint16mf4_t test_vslide1down_vx_u16mf4_m(vbool64_t mask, // // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslide1down_vx_u16mf2_m(vbool32_t mask, @@ -838,7 +838,7 @@ vuint16mf2_t test_vslide1down_vx_u16mf2_m(vbool32_t mask, // // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslide1down_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -850,7 +850,7 @@ vuint16m1_t test_vslide1down_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslide1down_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -862,7 +862,7 @@ vuint16m2_t test_vslide1down_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslide1down_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -874,7 +874,7 @@ vuint16m4_t test_vslide1down_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslide1down_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -886,7 +886,7 @@ vuint16m8_t test_vslide1down_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslide1down_vx_u32mf2_m(vbool64_t mask, @@ -899,7 +899,7 @@ vuint32mf2_t test_vslide1down_vx_u32mf2_m(vbool64_t mask, // // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslide1down_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -911,7 +911,7 @@ vuint32m1_t test_vslide1down_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslide1down_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -923,7 +923,7 @@ vuint32m2_t test_vslide1down_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslide1down_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -935,7 +935,7 @@ vuint32m4_t test_vslide1down_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslide1down_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -947,7 +947,7 @@ vuint32m8_t test_vslide1down_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslide1down_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -959,7 +959,7 @@ vuint64m1_t test_vslide1down_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslide1down_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -971,7 +971,7 @@ vuint64m2_t test_vslide1down_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslide1down_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -983,7 +983,7 @@ vuint64m4_t test_vslide1down_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslide1down_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c index 478b061..051d976 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c @@ -465,7 +465,7 @@ vuint64m8_t test_vslide1up_vx_u64m8(vuint64m8_t src, uint64_t value, // // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslide1up_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, @@ -476,7 +476,7 @@ vint8mf8_t test_vslide1up_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslide1up_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, @@ -487,7 +487,7 @@ vint8mf4_t test_vslide1up_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslide1up_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, @@ -498,7 +498,7 @@ vint8mf2_t test_vslide1up_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslide1up_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, @@ -509,7 +509,7 @@ vint8m1_t test_vslide1up_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslide1up_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, @@ -520,7 +520,7 @@ vint8m2_t test_vslide1up_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslide1up_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, @@ -531,7 +531,7 @@ vint8m4_t test_vslide1up_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslide1up_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, @@ -542,7 +542,7 @@ vint8m8_t test_vslide1up_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslide1up_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -554,7 +554,7 @@ vint16mf4_t test_vslide1up_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslide1up_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -566,7 +566,7 @@ vint16mf2_t test_vslide1up_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslide1up_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -577,7 +577,7 @@ vint16m1_t test_vslide1up_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslide1up_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -588,7 +588,7 @@ vint16m2_t test_vslide1up_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslide1up_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -599,7 +599,7 @@ vint16m4_t test_vslide1up_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslide1up_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -610,7 +610,7 @@ vint16m8_t test_vslide1up_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslide1up_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -622,7 +622,7 @@ vint32mf2_t test_vslide1up_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslide1up_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -633,7 +633,7 @@ vint32m1_t test_vslide1up_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslide1up_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -644,7 +644,7 @@ vint32m2_t test_vslide1up_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslide1up_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -655,7 +655,7 @@ vint32m4_t test_vslide1up_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslide1up_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -666,7 +666,7 @@ vint32m8_t test_vslide1up_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslide1up_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -677,7 +677,7 @@ vint64m1_t test_vslide1up_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslide1up_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -688,7 +688,7 @@ vint64m2_t test_vslide1up_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslide1up_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -699,7 +699,7 @@ vint64m4_t test_vslide1up_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslide1up_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -710,7 +710,7 @@ vint64m8_t test_vslide1up_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslide1up_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, @@ -722,7 +722,7 @@ vuint8mf8_t test_vslide1up_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslide1up_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, @@ -734,7 +734,7 @@ vuint8mf4_t test_vslide1up_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslide1up_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, @@ -746,7 +746,7 @@ vuint8mf2_t test_vslide1up_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslide1up_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, @@ -757,7 +757,7 @@ vuint8m1_t test_vslide1up_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslide1up_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, @@ -768,7 +768,7 @@ vuint8m2_t test_vslide1up_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslide1up_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, @@ -779,7 +779,7 @@ vuint8m4_t test_vslide1up_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslide1up_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, @@ -790,7 +790,7 @@ vuint8m8_t test_vslide1up_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslide1up_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -802,7 +802,7 @@ vuint16mf4_t test_vslide1up_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslide1up_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -814,7 +814,7 @@ vuint16mf2_t test_vslide1up_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslide1up_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -826,7 +826,7 @@ vuint16m1_t test_vslide1up_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslide1up_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -838,7 +838,7 @@ vuint16m2_t test_vslide1up_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslide1up_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -850,7 +850,7 @@ vuint16m4_t test_vslide1up_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslide1up_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -862,7 +862,7 @@ vuint16m8_t test_vslide1up_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslide1up_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -874,7 +874,7 @@ vuint32mf2_t test_vslide1up_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslide1up_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -886,7 +886,7 @@ vuint32m1_t test_vslide1up_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslide1up_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -898,7 +898,7 @@ vuint32m2_t test_vslide1up_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslide1up_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -910,7 +910,7 @@ vuint32m4_t test_vslide1up_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslide1up_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -922,7 +922,7 @@ vuint32m8_t test_vslide1up_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslide1up_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -934,7 +934,7 @@ vuint64m1_t test_vslide1up_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslide1up_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -946,7 +946,7 @@ vuint64m2_t test_vslide1up_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslide1up_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -958,7 +958,7 @@ vuint64m4_t test_vslide1up_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslide1up_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslidedown.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslidedown.c index a3793bf..b5e0603 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslidedown.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslidedown.c @@ -591,7 +591,7 @@ vfloat64m8_t test_vslidedown_vx_f64m8(vfloat64m8_t dst, vfloat64m8_t src, // // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslidedown_vx_i8mf8_m(vbool64_t mask, vint8mf8_t dst, @@ -603,7 +603,7 @@ vint8mf8_t test_vslidedown_vx_i8mf8_m(vbool64_t mask, vint8mf8_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslidedown_vx_i8mf4_m(vbool32_t mask, vint8mf4_t dst, @@ -615,7 +615,7 @@ vint8mf4_t test_vslidedown_vx_i8mf4_m(vbool32_t mask, vint8mf4_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslidedown_vx_i8mf2_m(vbool16_t mask, vint8mf2_t dst, @@ -627,7 +627,7 @@ vint8mf2_t test_vslidedown_vx_i8mf2_m(vbool16_t mask, vint8mf2_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslidedown_vx_i8m1_m(vbool8_t mask, vint8m1_t dst, vint8m1_t src, @@ -638,7 +638,7 @@ vint8m1_t test_vslidedown_vx_i8m1_m(vbool8_t mask, vint8m1_t dst, vint8m1_t src, // // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslidedown_vx_i8m2_m(vbool4_t mask, vint8m2_t dst, vint8m2_t src, @@ -649,7 +649,7 @@ vint8m2_t test_vslidedown_vx_i8m2_m(vbool4_t mask, vint8m2_t dst, vint8m2_t src, // // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslidedown_vx_i8m4_m(vbool2_t mask, vint8m4_t dst, vint8m4_t src, @@ -660,7 +660,7 @@ vint8m4_t test_vslidedown_vx_i8m4_m(vbool2_t mask, vint8m4_t dst, vint8m4_t src, // // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslidedown_vx_i8m8_m(vbool1_t mask, vint8m8_t dst, vint8m8_t src, @@ -671,7 +671,7 @@ vint8m8_t test_vslidedown_vx_i8m8_m(vbool1_t mask, vint8m8_t dst, vint8m8_t src, // // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslidedown_vx_i16mf4_m(vbool64_t mask, vint16mf4_t dst, @@ -683,7 +683,7 @@ vint16mf4_t test_vslidedown_vx_i16mf4_m(vbool64_t mask, vint16mf4_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslidedown_vx_i16mf2_m(vbool32_t mask, vint16mf2_t dst, @@ -695,7 +695,7 @@ vint16mf2_t test_vslidedown_vx_i16mf2_m(vbool32_t mask, vint16mf2_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslidedown_vx_i16m1_m(vbool16_t mask, vint16m1_t dst, @@ -707,7 +707,7 @@ vint16m1_t test_vslidedown_vx_i16m1_m(vbool16_t mask, vint16m1_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslidedown_vx_i16m2_m(vbool8_t mask, vint16m2_t dst, @@ -719,7 +719,7 @@ vint16m2_t test_vslidedown_vx_i16m2_m(vbool8_t mask, vint16m2_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslidedown_vx_i16m4_m(vbool4_t mask, vint16m4_t dst, @@ -731,7 +731,7 @@ vint16m4_t test_vslidedown_vx_i16m4_m(vbool4_t mask, vint16m4_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslidedown_vx_i16m8_m(vbool2_t mask, vint16m8_t dst, @@ -743,7 +743,7 @@ vint16m8_t test_vslidedown_vx_i16m8_m(vbool2_t mask, vint16m8_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslidedown_vx_i32mf2_m(vbool64_t mask, vint32mf2_t dst, @@ -755,7 +755,7 @@ vint32mf2_t test_vslidedown_vx_i32mf2_m(vbool64_t mask, vint32mf2_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslidedown_vx_i32m1_m(vbool32_t mask, vint32m1_t dst, @@ -767,7 +767,7 @@ vint32m1_t test_vslidedown_vx_i32m1_m(vbool32_t mask, vint32m1_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslidedown_vx_i32m2_m(vbool16_t mask, vint32m2_t dst, @@ -779,7 +779,7 @@ vint32m2_t test_vslidedown_vx_i32m2_m(vbool16_t mask, vint32m2_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslidedown_vx_i32m4_m(vbool8_t mask, vint32m4_t dst, @@ -791,7 +791,7 @@ vint32m4_t test_vslidedown_vx_i32m4_m(vbool8_t mask, vint32m4_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslidedown_vx_i32m8_m(vbool4_t mask, vint32m8_t dst, @@ -803,7 +803,7 @@ vint32m8_t test_vslidedown_vx_i32m8_m(vbool4_t mask, vint32m8_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslidedown_vx_i64m1_m(vbool64_t mask, vint64m1_t dst, @@ -815,7 +815,7 @@ vint64m1_t test_vslidedown_vx_i64m1_m(vbool64_t mask, vint64m1_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslidedown_vx_i64m2_m(vbool32_t mask, vint64m2_t dst, @@ -827,7 +827,7 @@ vint64m2_t test_vslidedown_vx_i64m2_m(vbool32_t mask, vint64m2_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslidedown_vx_i64m4_m(vbool16_t mask, vint64m4_t dst, @@ -839,7 +839,7 @@ vint64m4_t test_vslidedown_vx_i64m4_m(vbool16_t mask, vint64m4_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslidedown_vx_i64m8_m(vbool8_t mask, vint64m8_t dst, @@ -851,7 +851,7 @@ vint64m8_t test_vslidedown_vx_i64m8_m(vbool8_t mask, vint64m8_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslidedown_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t dst, @@ -863,7 +863,7 @@ vuint8mf8_t test_vslidedown_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslidedown_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t dst, @@ -875,7 +875,7 @@ vuint8mf4_t test_vslidedown_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslidedown_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t dst, @@ -887,7 +887,7 @@ vuint8mf2_t test_vslidedown_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslidedown_vx_u8m1_m(vbool8_t mask, vuint8m1_t dst, @@ -898,7 +898,7 @@ vuint8m1_t test_vslidedown_vx_u8m1_m(vbool8_t mask, vuint8m1_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslidedown_vx_u8m2_m(vbool4_t mask, vuint8m2_t dst, @@ -909,7 +909,7 @@ vuint8m2_t test_vslidedown_vx_u8m2_m(vbool4_t mask, vuint8m2_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslidedown_vx_u8m4_m(vbool2_t mask, vuint8m4_t dst, @@ -920,7 +920,7 @@ vuint8m4_t test_vslidedown_vx_u8m4_m(vbool2_t mask, vuint8m4_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslidedown_vx_u8m8_m(vbool1_t mask, vuint8m8_t dst, @@ -931,7 +931,7 @@ vuint8m8_t test_vslidedown_vx_u8m8_m(vbool1_t mask, vuint8m8_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslidedown_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t dst, @@ -943,7 +943,7 @@ vuint16mf4_t test_vslidedown_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslidedown_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t dst, @@ -955,7 +955,7 @@ vuint16mf2_t test_vslidedown_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslidedown_vx_u16m1_m(vbool16_t mask, vuint16m1_t dst, @@ -967,7 +967,7 @@ vuint16m1_t test_vslidedown_vx_u16m1_m(vbool16_t mask, vuint16m1_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslidedown_vx_u16m2_m(vbool8_t mask, vuint16m2_t dst, @@ -979,7 +979,7 @@ vuint16m2_t test_vslidedown_vx_u16m2_m(vbool8_t mask, vuint16m2_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslidedown_vx_u16m4_m(vbool4_t mask, vuint16m4_t dst, @@ -991,7 +991,7 @@ vuint16m4_t test_vslidedown_vx_u16m4_m(vbool4_t mask, vuint16m4_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslidedown_vx_u16m8_m(vbool2_t mask, vuint16m8_t dst, @@ -1003,7 +1003,7 @@ vuint16m8_t test_vslidedown_vx_u16m8_m(vbool2_t mask, vuint16m8_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslidedown_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t dst, @@ -1015,7 +1015,7 @@ vuint32mf2_t test_vslidedown_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslidedown_vx_u32m1_m(vbool32_t mask, vuint32m1_t dst, @@ -1027,7 +1027,7 @@ vuint32m1_t test_vslidedown_vx_u32m1_m(vbool32_t mask, vuint32m1_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslidedown_vx_u32m2_m(vbool16_t mask, vuint32m2_t dst, @@ -1039,7 +1039,7 @@ vuint32m2_t test_vslidedown_vx_u32m2_m(vbool16_t mask, vuint32m2_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslidedown_vx_u32m4_m(vbool8_t mask, vuint32m4_t dst, @@ -1051,7 +1051,7 @@ vuint32m4_t test_vslidedown_vx_u32m4_m(vbool8_t mask, vuint32m4_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslidedown_vx_u32m8_m(vbool4_t mask, vuint32m8_t dst, @@ -1063,7 +1063,7 @@ vuint32m8_t test_vslidedown_vx_u32m8_m(vbool4_t mask, vuint32m8_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslidedown_vx_u64m1_m(vbool64_t mask, vuint64m1_t dst, @@ -1075,7 +1075,7 @@ vuint64m1_t test_vslidedown_vx_u64m1_m(vbool64_t mask, vuint64m1_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslidedown_vx_u64m2_m(vbool32_t mask, vuint64m2_t dst, @@ -1087,7 +1087,7 @@ vuint64m2_t test_vslidedown_vx_u64m2_m(vbool32_t mask, vuint64m2_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslidedown_vx_u64m4_m(vbool16_t mask, vuint64m4_t dst, @@ -1099,7 +1099,7 @@ vuint64m4_t test_vslidedown_vx_u64m4_m(vbool16_t mask, vuint64m4_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslidedown_vx_u64m8_m(vbool8_t mask, vuint64m8_t dst, @@ -1111,7 +1111,7 @@ vuint64m8_t test_vslidedown_vx_u64m8_m(vbool8_t mask, vuint64m8_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vslidedown_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t dst, @@ -1123,7 +1123,7 @@ vfloat32mf2_t test_vslidedown_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vslidedown_vx_f32m1_m(vbool32_t mask, vfloat32m1_t dst, @@ -1135,7 +1135,7 @@ vfloat32m1_t test_vslidedown_vx_f32m1_m(vbool32_t mask, vfloat32m1_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vslidedown_vx_f32m2_m(vbool16_t mask, vfloat32m2_t dst, @@ -1147,7 +1147,7 @@ vfloat32m2_t test_vslidedown_vx_f32m2_m(vbool16_t mask, vfloat32m2_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vslidedown_vx_f32m4_m(vbool8_t mask, vfloat32m4_t dst, @@ -1159,7 +1159,7 @@ vfloat32m4_t test_vslidedown_vx_f32m4_m(vbool8_t mask, vfloat32m4_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vslidedown_vx_f32m8_m(vbool4_t mask, vfloat32m8_t dst, @@ -1171,7 +1171,7 @@ vfloat32m8_t test_vslidedown_vx_f32m8_m(vbool4_t mask, vfloat32m8_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vslidedown_vx_f64m1_m(vbool64_t mask, vfloat64m1_t dst, @@ -1183,7 +1183,7 @@ vfloat64m1_t test_vslidedown_vx_f64m1_m(vbool64_t mask, vfloat64m1_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vslidedown_vx_f64m2_m(vbool32_t mask, vfloat64m2_t dst, @@ -1195,7 +1195,7 @@ vfloat64m2_t test_vslidedown_vx_f64m2_m(vbool32_t mask, vfloat64m2_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vslidedown_vx_f64m4_m(vbool16_t mask, vfloat64m4_t dst, @@ -1207,7 +1207,7 @@ vfloat64m4_t test_vslidedown_vx_f64m4_m(vbool16_t mask, vfloat64m4_t dst, // // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vslidedown_vx_f64m8_m(vbool8_t mask, vfloat64m8_t dst, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslideup.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslideup.c index 49d954d..0dcc739 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslideup.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslideup.c @@ -591,7 +591,7 @@ vfloat64m8_t test_vslideup_vx_f64m8(vfloat64m8_t dst, vfloat64m8_t src, // // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslideup_vx_i8mf8_m(vbool64_t mask, vint8mf8_t dst, @@ -602,7 +602,7 @@ vint8mf8_t test_vslideup_vx_i8mf8_m(vbool64_t mask, vint8mf8_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslideup_vx_i8mf4_m(vbool32_t mask, vint8mf4_t dst, @@ -613,7 +613,7 @@ vint8mf4_t test_vslideup_vx_i8mf4_m(vbool32_t mask, vint8mf4_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslideup_vx_i8mf2_m(vbool16_t mask, vint8mf2_t dst, @@ -624,7 +624,7 @@ vint8mf2_t test_vslideup_vx_i8mf2_m(vbool16_t mask, vint8mf2_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslideup_vx_i8m1_m(vbool8_t mask, vint8m1_t dst, vint8m1_t src, @@ -635,7 +635,7 @@ vint8m1_t test_vslideup_vx_i8m1_m(vbool8_t mask, vint8m1_t dst, vint8m1_t src, // // CHECK-RV64-LABEL: @test_vslideup_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslideup_vx_i8m2_m(vbool4_t mask, vint8m2_t dst, vint8m2_t src, @@ -646,7 +646,7 @@ vint8m2_t test_vslideup_vx_i8m2_m(vbool4_t mask, vint8m2_t dst, vint8m2_t src, // // CHECK-RV64-LABEL: @test_vslideup_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslideup_vx_i8m4_m(vbool2_t mask, vint8m4_t dst, vint8m4_t src, @@ -657,7 +657,7 @@ vint8m4_t test_vslideup_vx_i8m4_m(vbool2_t mask, vint8m4_t dst, vint8m4_t src, // // CHECK-RV64-LABEL: @test_vslideup_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslideup_vx_i8m8_m(vbool1_t mask, vint8m8_t dst, vint8m8_t src, @@ -668,7 +668,7 @@ vint8m8_t test_vslideup_vx_i8m8_m(vbool1_t mask, vint8m8_t dst, vint8m8_t src, // // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslideup_vx_i16mf4_m(vbool64_t mask, vint16mf4_t dst, @@ -680,7 +680,7 @@ vint16mf4_t test_vslideup_vx_i16mf4_m(vbool64_t mask, vint16mf4_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslideup_vx_i16mf2_m(vbool32_t mask, vint16mf2_t dst, @@ -692,7 +692,7 @@ vint16mf2_t test_vslideup_vx_i16mf2_m(vbool32_t mask, vint16mf2_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslideup_vx_i16m1_m(vbool16_t mask, vint16m1_t dst, @@ -703,7 +703,7 @@ vint16m1_t test_vslideup_vx_i16m1_m(vbool16_t mask, vint16m1_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslideup_vx_i16m2_m(vbool8_t mask, vint16m2_t dst, @@ -714,7 +714,7 @@ vint16m2_t test_vslideup_vx_i16m2_m(vbool8_t mask, vint16m2_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslideup_vx_i16m4_m(vbool4_t mask, vint16m4_t dst, @@ -725,7 +725,7 @@ vint16m4_t test_vslideup_vx_i16m4_m(vbool4_t mask, vint16m4_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslideup_vx_i16m8_m(vbool2_t mask, vint16m8_t dst, @@ -736,7 +736,7 @@ vint16m8_t test_vslideup_vx_i16m8_m(vbool2_t mask, vint16m8_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslideup_vx_i32mf2_m(vbool64_t mask, vint32mf2_t dst, @@ -748,7 +748,7 @@ vint32mf2_t test_vslideup_vx_i32mf2_m(vbool64_t mask, vint32mf2_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslideup_vx_i32m1_m(vbool32_t mask, vint32m1_t dst, @@ -759,7 +759,7 @@ vint32m1_t test_vslideup_vx_i32m1_m(vbool32_t mask, vint32m1_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslideup_vx_i32m2_m(vbool16_t mask, vint32m2_t dst, @@ -770,7 +770,7 @@ vint32m2_t test_vslideup_vx_i32m2_m(vbool16_t mask, vint32m2_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslideup_vx_i32m4_m(vbool8_t mask, vint32m4_t dst, @@ -781,7 +781,7 @@ vint32m4_t test_vslideup_vx_i32m4_m(vbool8_t mask, vint32m4_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslideup_vx_i32m8_m(vbool4_t mask, vint32m8_t dst, @@ -792,7 +792,7 @@ vint32m8_t test_vslideup_vx_i32m8_m(vbool4_t mask, vint32m8_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslideup_vx_i64m1_m(vbool64_t mask, vint64m1_t dst, @@ -803,7 +803,7 @@ vint64m1_t test_vslideup_vx_i64m1_m(vbool64_t mask, vint64m1_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslideup_vx_i64m2_m(vbool32_t mask, vint64m2_t dst, @@ -814,7 +814,7 @@ vint64m2_t test_vslideup_vx_i64m2_m(vbool32_t mask, vint64m2_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslideup_vx_i64m4_m(vbool16_t mask, vint64m4_t dst, @@ -825,7 +825,7 @@ vint64m4_t test_vslideup_vx_i64m4_m(vbool16_t mask, vint64m4_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslideup_vx_i64m8_m(vbool8_t mask, vint64m8_t dst, @@ -836,7 +836,7 @@ vint64m8_t test_vslideup_vx_i64m8_m(vbool8_t mask, vint64m8_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslideup_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t dst, @@ -848,7 +848,7 @@ vuint8mf8_t test_vslideup_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslideup_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t dst, @@ -860,7 +860,7 @@ vuint8mf4_t test_vslideup_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslideup_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t dst, @@ -872,7 +872,7 @@ vuint8mf2_t test_vslideup_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslideup_vx_u8m1_m(vbool8_t mask, vuint8m1_t dst, @@ -883,7 +883,7 @@ vuint8m1_t test_vslideup_vx_u8m1_m(vbool8_t mask, vuint8m1_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslideup_vx_u8m2_m(vbool4_t mask, vuint8m2_t dst, @@ -894,7 +894,7 @@ vuint8m2_t test_vslideup_vx_u8m2_m(vbool4_t mask, vuint8m2_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslideup_vx_u8m4_m(vbool2_t mask, vuint8m4_t dst, @@ -905,7 +905,7 @@ vuint8m4_t test_vslideup_vx_u8m4_m(vbool2_t mask, vuint8m4_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslideup_vx_u8m8_m(vbool1_t mask, vuint8m8_t dst, @@ -916,7 +916,7 @@ vuint8m8_t test_vslideup_vx_u8m8_m(vbool1_t mask, vuint8m8_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslideup_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t dst, @@ -928,7 +928,7 @@ vuint16mf4_t test_vslideup_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslideup_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t dst, @@ -940,7 +940,7 @@ vuint16mf2_t test_vslideup_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslideup_vx_u16m1_m(vbool16_t mask, vuint16m1_t dst, @@ -952,7 +952,7 @@ vuint16m1_t test_vslideup_vx_u16m1_m(vbool16_t mask, vuint16m1_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslideup_vx_u16m2_m(vbool8_t mask, vuint16m2_t dst, @@ -964,7 +964,7 @@ vuint16m2_t test_vslideup_vx_u16m2_m(vbool8_t mask, vuint16m2_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslideup_vx_u16m4_m(vbool4_t mask, vuint16m4_t dst, @@ -976,7 +976,7 @@ vuint16m4_t test_vslideup_vx_u16m4_m(vbool4_t mask, vuint16m4_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslideup_vx_u16m8_m(vbool2_t mask, vuint16m8_t dst, @@ -988,7 +988,7 @@ vuint16m8_t test_vslideup_vx_u16m8_m(vbool2_t mask, vuint16m8_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslideup_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t dst, @@ -1000,7 +1000,7 @@ vuint32mf2_t test_vslideup_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslideup_vx_u32m1_m(vbool32_t mask, vuint32m1_t dst, @@ -1012,7 +1012,7 @@ vuint32m1_t test_vslideup_vx_u32m1_m(vbool32_t mask, vuint32m1_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslideup_vx_u32m2_m(vbool16_t mask, vuint32m2_t dst, @@ -1024,7 +1024,7 @@ vuint32m2_t test_vslideup_vx_u32m2_m(vbool16_t mask, vuint32m2_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslideup_vx_u32m4_m(vbool8_t mask, vuint32m4_t dst, @@ -1036,7 +1036,7 @@ vuint32m4_t test_vslideup_vx_u32m4_m(vbool8_t mask, vuint32m4_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslideup_vx_u32m8_m(vbool4_t mask, vuint32m8_t dst, @@ -1048,7 +1048,7 @@ vuint32m8_t test_vslideup_vx_u32m8_m(vbool4_t mask, vuint32m8_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslideup_vx_u64m1_m(vbool64_t mask, vuint64m1_t dst, @@ -1060,7 +1060,7 @@ vuint64m1_t test_vslideup_vx_u64m1_m(vbool64_t mask, vuint64m1_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslideup_vx_u64m2_m(vbool32_t mask, vuint64m2_t dst, @@ -1072,7 +1072,7 @@ vuint64m2_t test_vslideup_vx_u64m2_m(vbool32_t mask, vuint64m2_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslideup_vx_u64m4_m(vbool16_t mask, vuint64m4_t dst, @@ -1084,7 +1084,7 @@ vuint64m4_t test_vslideup_vx_u64m4_m(vbool16_t mask, vuint64m4_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslideup_vx_u64m8_m(vbool8_t mask, vuint64m8_t dst, @@ -1096,7 +1096,7 @@ vuint64m8_t test_vslideup_vx_u64m8_m(vbool8_t mask, vuint64m8_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vslideup_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t dst, @@ -1108,7 +1108,7 @@ vfloat32mf2_t test_vslideup_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vslideup_vx_f32m1_m(vbool32_t mask, vfloat32m1_t dst, @@ -1120,7 +1120,7 @@ vfloat32m1_t test_vslideup_vx_f32m1_m(vbool32_t mask, vfloat32m1_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vslideup_vx_f32m2_m(vbool16_t mask, vfloat32m2_t dst, @@ -1132,7 +1132,7 @@ vfloat32m2_t test_vslideup_vx_f32m2_m(vbool16_t mask, vfloat32m2_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vslideup_vx_f32m4_m(vbool8_t mask, vfloat32m4_t dst, @@ -1144,7 +1144,7 @@ vfloat32m4_t test_vslideup_vx_f32m4_m(vbool8_t mask, vfloat32m4_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vslideup_vx_f32m8_m(vbool4_t mask, vfloat32m8_t dst, @@ -1156,7 +1156,7 @@ vfloat32m8_t test_vslideup_vx_f32m8_m(vbool4_t mask, vfloat32m8_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vslideup_vx_f64m1_m(vbool64_t mask, vfloat64m1_t dst, @@ -1168,7 +1168,7 @@ vfloat64m1_t test_vslideup_vx_f64m1_m(vbool64_t mask, vfloat64m1_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vslideup_vx_f64m2_m(vbool32_t mask, vfloat64m2_t dst, @@ -1180,7 +1180,7 @@ vfloat64m2_t test_vslideup_vx_f64m2_m(vbool32_t mask, vfloat64m2_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vslideup_vx_f64m4_m(vbool16_t mask, vfloat64m4_t dst, @@ -1192,7 +1192,7 @@ vfloat64m4_t test_vslideup_vx_f64m4_m(vbool16_t mask, vfloat64m4_t dst, // // CHECK-RV64-LABEL: @test_vslideup_vx_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vslideup_vx_f64m8_m(vbool8_t mask, vfloat64m8_t dst, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c index f37c2de..532ac398 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c @@ -887,7 +887,7 @@ vuint64m8_t test_vsll_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { // // CHECK-RV64-LABEL: @test_vsll_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsll_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -897,7 +897,7 @@ vint8mf8_t test_vsll_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t // // CHECK-RV64-LABEL: @test_vsll_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsll_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { @@ -907,7 +907,7 @@ vint8mf8_t test_vsll_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t // // CHECK-RV64-LABEL: @test_vsll_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsll_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -917,7 +917,7 @@ vint8mf4_t test_vsll_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t // // CHECK-RV64-LABEL: @test_vsll_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsll_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { @@ -927,7 +927,7 @@ vint8mf4_t test_vsll_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t // // CHECK-RV64-LABEL: @test_vsll_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsll_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -937,7 +937,7 @@ vint8mf2_t test_vsll_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t // // CHECK-RV64-LABEL: @test_vsll_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsll_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { @@ -947,7 +947,7 @@ vint8mf2_t test_vsll_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t // // CHECK-RV64-LABEL: @test_vsll_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsll_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -957,7 +957,7 @@ vint8m1_t test_vsll_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, // // CHECK-RV64-LABEL: @test_vsll_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsll_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { @@ -967,7 +967,7 @@ vint8m1_t test_vsll_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, // // CHECK-RV64-LABEL: @test_vsll_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsll_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -977,7 +977,7 @@ vint8m2_t test_vsll_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, // // CHECK-RV64-LABEL: @test_vsll_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsll_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { @@ -987,7 +987,7 @@ vint8m2_t test_vsll_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, // // CHECK-RV64-LABEL: @test_vsll_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsll_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -997,7 +997,7 @@ vint8m4_t test_vsll_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, // // CHECK-RV64-LABEL: @test_vsll_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsll_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { @@ -1007,7 +1007,7 @@ vint8m4_t test_vsll_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, // // CHECK-RV64-LABEL: @test_vsll_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsll_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -1017,7 +1017,7 @@ vint8m8_t test_vsll_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, // // CHECK-RV64-LABEL: @test_vsll_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsll_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { @@ -1027,7 +1027,7 @@ vint8m8_t test_vsll_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, // // CHECK-RV64-LABEL: @test_vsll_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsll_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { @@ -1037,7 +1037,7 @@ vint16mf4_t test_vsll_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vsll_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsll_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { @@ -1047,7 +1047,7 @@ vint16mf4_t test_vsll_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vsll_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsll_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { @@ -1057,7 +1057,7 @@ vint16mf2_t test_vsll_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vsll_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsll_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { @@ -1067,7 +1067,7 @@ vint16mf2_t test_vsll_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vsll_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsll_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -1077,7 +1077,7 @@ vint16m1_t test_vsll_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t // // CHECK-RV64-LABEL: @test_vsll_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsll_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { @@ -1087,7 +1087,7 @@ vint16m1_t test_vsll_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t // // CHECK-RV64-LABEL: @test_vsll_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsll_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -1097,7 +1097,7 @@ vint16m2_t test_vsll_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // // CHECK-RV64-LABEL: @test_vsll_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsll_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { @@ -1107,7 +1107,7 @@ vint16m2_t test_vsll_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // // CHECK-RV64-LABEL: @test_vsll_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsll_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -1117,7 +1117,7 @@ vint16m4_t test_vsll_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // // CHECK-RV64-LABEL: @test_vsll_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsll_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { @@ -1127,7 +1127,7 @@ vint16m4_t test_vsll_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // // CHECK-RV64-LABEL: @test_vsll_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsll_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -1137,7 +1137,7 @@ vint16m8_t test_vsll_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // // CHECK-RV64-LABEL: @test_vsll_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsll_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { @@ -1147,7 +1147,7 @@ vint16m8_t test_vsll_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // // CHECK-RV64-LABEL: @test_vsll_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsll_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { @@ -1157,7 +1157,7 @@ vint32mf2_t test_vsll_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m // // CHECK-RV64-LABEL: @test_vsll_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsll_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { @@ -1167,7 +1167,7 @@ vint32mf2_t test_vsll_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m // // CHECK-RV64-LABEL: @test_vsll_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsll_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -1177,7 +1177,7 @@ vint32m1_t test_vsll_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t // // CHECK-RV64-LABEL: @test_vsll_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsll_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { @@ -1187,7 +1187,7 @@ vint32m1_t test_vsll_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t // // CHECK-RV64-LABEL: @test_vsll_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsll_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -1197,7 +1197,7 @@ vint32m2_t test_vsll_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t // // CHECK-RV64-LABEL: @test_vsll_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsll_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { @@ -1207,7 +1207,7 @@ vint32m2_t test_vsll_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t // // CHECK-RV64-LABEL: @test_vsll_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsll_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -1217,7 +1217,7 @@ vint32m4_t test_vsll_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // // CHECK-RV64-LABEL: @test_vsll_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsll_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { @@ -1227,7 +1227,7 @@ vint32m4_t test_vsll_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // // CHECK-RV64-LABEL: @test_vsll_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsll_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -1237,7 +1237,7 @@ vint32m8_t test_vsll_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // // CHECK-RV64-LABEL: @test_vsll_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsll_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { @@ -1247,7 +1247,7 @@ vint32m8_t test_vsll_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // // CHECK-RV64-LABEL: @test_vsll_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsll_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -1257,7 +1257,7 @@ vint64m1_t test_vsll_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t // // CHECK-RV64-LABEL: @test_vsll_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsll_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { @@ -1267,7 +1267,7 @@ vint64m1_t test_vsll_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t // // CHECK-RV64-LABEL: @test_vsll_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsll_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -1277,7 +1277,7 @@ vint64m2_t test_vsll_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t // // CHECK-RV64-LABEL: @test_vsll_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsll_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { @@ -1287,7 +1287,7 @@ vint64m2_t test_vsll_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t // // CHECK-RV64-LABEL: @test_vsll_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsll_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -1297,7 +1297,7 @@ vint64m4_t test_vsll_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t // // CHECK-RV64-LABEL: @test_vsll_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsll_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { @@ -1307,7 +1307,7 @@ vint64m4_t test_vsll_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t // // CHECK-RV64-LABEL: @test_vsll_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsll_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -1317,7 +1317,7 @@ vint64m8_t test_vsll_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // // CHECK-RV64-LABEL: @test_vsll_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsll_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { @@ -1327,7 +1327,7 @@ vint64m8_t test_vsll_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // // CHECK-RV64-LABEL: @test_vsll_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsll_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -1337,7 +1337,7 @@ vuint8mf8_t test_vsll_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf // // CHECK-RV64-LABEL: @test_vsll_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsll_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { @@ -1347,7 +1347,7 @@ vuint8mf8_t test_vsll_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf // // CHECK-RV64-LABEL: @test_vsll_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsll_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -1357,7 +1357,7 @@ vuint8mf4_t test_vsll_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf // // CHECK-RV64-LABEL: @test_vsll_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsll_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { @@ -1367,7 +1367,7 @@ vuint8mf4_t test_vsll_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf // // CHECK-RV64-LABEL: @test_vsll_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsll_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -1377,7 +1377,7 @@ vuint8mf2_t test_vsll_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf // // CHECK-RV64-LABEL: @test_vsll_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsll_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { @@ -1387,7 +1387,7 @@ vuint8mf2_t test_vsll_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf // // CHECK-RV64-LABEL: @test_vsll_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsll_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -1397,7 +1397,7 @@ vuint8m1_t test_vsll_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t o // // CHECK-RV64-LABEL: @test_vsll_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsll_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { @@ -1407,7 +1407,7 @@ vuint8m1_t test_vsll_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t o // // CHECK-RV64-LABEL: @test_vsll_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsll_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -1417,7 +1417,7 @@ vuint8m2_t test_vsll_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t o // // CHECK-RV64-LABEL: @test_vsll_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsll_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { @@ -1427,7 +1427,7 @@ vuint8m2_t test_vsll_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t o // // CHECK-RV64-LABEL: @test_vsll_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsll_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -1437,7 +1437,7 @@ vuint8m4_t test_vsll_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t o // // CHECK-RV64-LABEL: @test_vsll_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsll_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { @@ -1447,7 +1447,7 @@ vuint8m4_t test_vsll_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t o // // CHECK-RV64-LABEL: @test_vsll_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsll_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -1457,7 +1457,7 @@ vuint8m8_t test_vsll_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t o // // CHECK-RV64-LABEL: @test_vsll_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsll_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { @@ -1467,7 +1467,7 @@ vuint8m8_t test_vsll_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t o // // CHECK-RV64-LABEL: @test_vsll_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsll_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { @@ -1477,7 +1477,7 @@ vuint16mf4_t test_vsll_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vsll_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsll_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { @@ -1487,7 +1487,7 @@ vuint16mf4_t test_vsll_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vsll_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsll_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { @@ -1497,7 +1497,7 @@ vuint16mf2_t test_vsll_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vsll_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsll_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { @@ -1507,7 +1507,7 @@ vuint16mf2_t test_vsll_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vsll_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsll_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -1517,7 +1517,7 @@ vuint16m1_t test_vsll_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m // // CHECK-RV64-LABEL: @test_vsll_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsll_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { @@ -1527,7 +1527,7 @@ vuint16m1_t test_vsll_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m // // CHECK-RV64-LABEL: @test_vsll_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsll_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -1537,7 +1537,7 @@ vuint16m2_t test_vsll_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2 // // CHECK-RV64-LABEL: @test_vsll_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsll_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { @@ -1547,7 +1547,7 @@ vuint16m2_t test_vsll_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2 // // CHECK-RV64-LABEL: @test_vsll_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsll_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -1557,7 +1557,7 @@ vuint16m4_t test_vsll_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4 // // CHECK-RV64-LABEL: @test_vsll_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsll_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { @@ -1567,7 +1567,7 @@ vuint16m4_t test_vsll_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4 // // CHECK-RV64-LABEL: @test_vsll_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsll_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -1577,7 +1577,7 @@ vuint16m8_t test_vsll_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8 // // CHECK-RV64-LABEL: @test_vsll_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsll_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { @@ -1587,7 +1587,7 @@ vuint16m8_t test_vsll_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8 // // CHECK-RV64-LABEL: @test_vsll_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsll_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { @@ -1597,7 +1597,7 @@ vuint32mf2_t test_vsll_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vsll_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsll_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { @@ -1607,7 +1607,7 @@ vuint32mf2_t test_vsll_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vsll_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsll_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -1617,7 +1617,7 @@ vuint32m1_t test_vsll_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vsll_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsll_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { @@ -1627,7 +1627,7 @@ vuint32m1_t test_vsll_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vsll_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsll_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -1637,7 +1637,7 @@ vuint32m2_t test_vsll_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vsll_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsll_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { @@ -1647,7 +1647,7 @@ vuint32m2_t test_vsll_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vsll_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsll_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -1657,7 +1657,7 @@ vuint32m4_t test_vsll_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4 // // CHECK-RV64-LABEL: @test_vsll_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsll_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { @@ -1667,7 +1667,7 @@ vuint32m4_t test_vsll_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4 // // CHECK-RV64-LABEL: @test_vsll_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsll_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -1677,7 +1677,7 @@ vuint32m8_t test_vsll_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8 // // CHECK-RV64-LABEL: @test_vsll_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsll_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { @@ -1687,7 +1687,7 @@ vuint32m8_t test_vsll_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8 // // CHECK-RV64-LABEL: @test_vsll_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsll_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -1697,7 +1697,7 @@ vuint64m1_t test_vsll_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vsll_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsll_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { @@ -1707,7 +1707,7 @@ vuint64m1_t test_vsll_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vsll_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsll_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -1717,7 +1717,7 @@ vuint64m2_t test_vsll_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vsll_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsll_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { @@ -1727,7 +1727,7 @@ vuint64m2_t test_vsll_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vsll_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsll_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -1737,7 +1737,7 @@ vuint64m4_t test_vsll_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vsll_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsll_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { @@ -1747,7 +1747,7 @@ vuint64m4_t test_vsll_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vsll_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsll_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -1757,7 +1757,7 @@ vuint64m8_t test_vsll_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8 // // CHECK-RV64-LABEL: @test_vsll_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsll_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c index 347a44a..e2ae796 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c @@ -447,7 +447,7 @@ vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { // // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, @@ -458,7 +458,7 @@ vint8mf8_t test_vsmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, // // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, @@ -469,7 +469,7 @@ vint8mf8_t test_vsmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, // // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, @@ -480,7 +480,7 @@ vint8mf4_t test_vsmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, @@ -491,7 +491,7 @@ vint8mf4_t test_vsmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, @@ -502,7 +502,7 @@ vint8mf2_t test_vsmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, @@ -513,7 +513,7 @@ vint8mf2_t test_vsmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsmul_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, @@ -524,7 +524,7 @@ vint8m1_t test_vsmul_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vsmul_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, @@ -535,7 +535,7 @@ vint8m1_t test_vsmul_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vsmul_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, @@ -546,7 +546,7 @@ vint8m2_t test_vsmul_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsmul_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, @@ -557,7 +557,7 @@ vint8m2_t test_vsmul_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsmul_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, @@ -568,7 +568,7 @@ vint8m4_t test_vsmul_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vsmul_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, @@ -579,7 +579,7 @@ vint8m4_t test_vsmul_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vsmul_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, @@ -590,7 +590,7 @@ vint8m8_t test_vsmul_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vsmul_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, @@ -601,7 +601,7 @@ vint8m8_t test_vsmul_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -613,7 +613,7 @@ vint16mf4_t test_vsmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -624,7 +624,7 @@ vint16mf4_t test_vsmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -636,7 +636,7 @@ vint16mf2_t test_vsmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -647,7 +647,7 @@ vint16mf2_t test_vsmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsmul_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -658,7 +658,7 @@ vint16m1_t test_vsmul_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vsmul_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -669,7 +669,7 @@ vint16m1_t test_vsmul_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vsmul_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -680,7 +680,7 @@ vint16m2_t test_vsmul_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsmul_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -691,7 +691,7 @@ vint16m2_t test_vsmul_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsmul_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -702,7 +702,7 @@ vint16m4_t test_vsmul_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vsmul_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -713,7 +713,7 @@ vint16m4_t test_vsmul_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vsmul_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -724,7 +724,7 @@ vint16m8_t test_vsmul_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vsmul_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -735,7 +735,7 @@ vint16m8_t test_vsmul_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -747,7 +747,7 @@ vint32mf2_t test_vsmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -758,7 +758,7 @@ vint32mf2_t test_vsmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsmul_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -769,7 +769,7 @@ vint32m1_t test_vsmul_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vsmul_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -780,7 +780,7 @@ vint32m1_t test_vsmul_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vsmul_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -791,7 +791,7 @@ vint32m2_t test_vsmul_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsmul_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -802,7 +802,7 @@ vint32m2_t test_vsmul_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsmul_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -813,7 +813,7 @@ vint32m4_t test_vsmul_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vsmul_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -824,7 +824,7 @@ vint32m4_t test_vsmul_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vsmul_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -835,7 +835,7 @@ vint32m8_t test_vsmul_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vsmul_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -846,7 +846,7 @@ vint32m8_t test_vsmul_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vsmul_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -857,7 +857,7 @@ vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vsmul_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -868,7 +868,7 @@ vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vsmul_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -879,7 +879,7 @@ vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsmul_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -890,7 +890,7 @@ vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vsmul_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -901,7 +901,7 @@ vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vsmul_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -912,7 +912,7 @@ vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vsmul_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -923,7 +923,7 @@ vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vsmul_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsoxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsoxei.c index 377d3e7..442e107 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsoxei.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsoxei.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t value, @@ -21,7 +21,7 @@ void test_vsoxei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t value, @@ -33,7 +33,7 @@ void test_vsoxei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t value, @@ -45,7 +45,7 @@ void test_vsoxei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t value, @@ -57,7 +57,7 @@ void test_vsoxei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t value, @@ -69,7 +69,7 @@ void test_vsoxei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i8.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i8.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i8m4(int8_t *base, vuint8m4_t bindex, vint8m4_t value, @@ -81,7 +81,7 @@ void test_vsoxei8_v_i8m4(int8_t *base, vuint8m4_t bindex, vint8m4_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_i8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv64i8.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv64i8.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i8m8(int8_t *base, vuint8m8_t bindex, vint8m8_t value, @@ -93,7 +93,7 @@ void test_vsoxei8_v_i8m8(int8_t *base, vuint8m8_t bindex, vint8m8_t value, // CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t value, @@ -105,7 +105,7 @@ void test_vsoxei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t value, // CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t value, @@ -117,7 +117,7 @@ void test_vsoxei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t value, // CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t value, @@ -129,7 +129,7 @@ void test_vsoxei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t value, // CHECK-RV64-LABEL: @test_vsoxei16_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t value, @@ -141,7 +141,7 @@ void test_vsoxei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t value, // CHECK-RV64-LABEL: @test_vsoxei16_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t value, @@ -153,7 +153,7 @@ void test_vsoxei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t value, // CHECK-RV64-LABEL: @test_vsoxei16_v_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i8.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i8.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i8m4(int8_t *base, vuint16m8_t bindex, vint8m4_t value, @@ -165,7 +165,7 @@ void test_vsoxei16_v_i8m4(int8_t *base, vuint16m8_t bindex, vint8m4_t value, // CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t value, @@ -177,7 +177,7 @@ void test_vsoxei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t value, // CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t value, @@ -189,7 +189,7 @@ void test_vsoxei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t value, // CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t value, @@ -201,7 +201,7 @@ void test_vsoxei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t value, // CHECK-RV64-LABEL: @test_vsoxei32_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t value, @@ -213,7 +213,7 @@ void test_vsoxei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t value, // CHECK-RV64-LABEL: @test_vsoxei32_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t value, @@ -225,7 +225,7 @@ void test_vsoxei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t value, // CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t value, @@ -237,7 +237,7 @@ void test_vsoxei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t value, // CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t value, @@ -249,7 +249,7 @@ void test_vsoxei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t value, // CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t value, @@ -261,7 +261,7 @@ void test_vsoxei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t value, // CHECK-RV64-LABEL: @test_vsoxei64_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t value, @@ -273,7 +273,7 @@ void test_vsoxei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t value, @@ -285,7 +285,7 @@ void test_vsoxei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t value, @@ -297,7 +297,7 @@ void test_vsoxei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t value, @@ -309,7 +309,7 @@ void test_vsoxei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t value, @@ -321,7 +321,7 @@ void test_vsoxei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i16m4(int16_t *base, vuint8m2_t bindex, vint16m4_t value, @@ -333,7 +333,7 @@ void test_vsoxei8_v_i16m4(int16_t *base, vuint8m2_t bindex, vint16m4_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i16m8(int16_t *base, vuint8m4_t bindex, vint16m8_t value, @@ -345,7 +345,7 @@ void test_vsoxei8_v_i16m8(int16_t *base, vuint8m4_t bindex, vint16m8_t value, // CHECK-RV64-LABEL: @test_vsoxei16_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, @@ -357,7 +357,7 @@ void test_vsoxei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, @@ -369,7 +369,7 @@ void test_vsoxei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t value, @@ -381,7 +381,7 @@ void test_vsoxei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t value, // CHECK-RV64-LABEL: @test_vsoxei16_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t value, @@ -393,7 +393,7 @@ void test_vsoxei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t value, // CHECK-RV64-LABEL: @test_vsoxei16_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i16m4(int16_t *base, vuint16m4_t bindex, vint16m4_t value, @@ -405,7 +405,7 @@ void test_vsoxei16_v_i16m4(int16_t *base, vuint16m4_t bindex, vint16m4_t value, // CHECK-RV64-LABEL: @test_vsoxei16_v_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i16m8(int16_t *base, vuint16m8_t bindex, vint16m8_t value, @@ -417,7 +417,7 @@ void test_vsoxei16_v_i16m8(int16_t *base, vuint16m8_t bindex, vint16m8_t value, // CHECK-RV64-LABEL: @test_vsoxei32_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, @@ -429,7 +429,7 @@ void test_vsoxei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, @@ -441,7 +441,7 @@ void test_vsoxei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t value, @@ -453,7 +453,7 @@ void test_vsoxei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t value, // CHECK-RV64-LABEL: @test_vsoxei32_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t value, @@ -465,7 +465,7 @@ void test_vsoxei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t value, // CHECK-RV64-LABEL: @test_vsoxei32_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i16m4(int16_t *base, vuint32m8_t bindex, vint16m4_t value, @@ -477,7 +477,7 @@ void test_vsoxei32_v_i16m4(int16_t *base, vuint32m8_t bindex, vint16m4_t value, // CHECK-RV64-LABEL: @test_vsoxei64_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, @@ -489,7 +489,7 @@ void test_vsoxei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, @@ -501,7 +501,7 @@ void test_vsoxei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t value, @@ -513,7 +513,7 @@ void test_vsoxei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t value, // CHECK-RV64-LABEL: @test_vsoxei64_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t value, @@ -525,7 +525,7 @@ void test_vsoxei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, @@ -537,7 +537,7 @@ void test_vsoxei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t value, @@ -549,7 +549,7 @@ void test_vsoxei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t value, @@ -561,7 +561,7 @@ void test_vsoxei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t value, @@ -573,7 +573,7 @@ void test_vsoxei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i32m8(int32_t *base, vuint8m2_t bindex, vint32m8_t value, @@ -585,7 +585,7 @@ void test_vsoxei8_v_i32m8(int32_t *base, vuint8m2_t bindex, vint32m8_t value, // CHECK-RV64-LABEL: @test_vsoxei16_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, @@ -597,7 +597,7 @@ void test_vsoxei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t value, @@ -609,7 +609,7 @@ void test_vsoxei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t value, // CHECK-RV64-LABEL: @test_vsoxei16_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t value, @@ -621,7 +621,7 @@ void test_vsoxei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t value, // CHECK-RV64-LABEL: @test_vsoxei16_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t value, @@ -633,7 +633,7 @@ void test_vsoxei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t value, // CHECK-RV64-LABEL: @test_vsoxei16_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i32m8(int32_t *base, vuint16m4_t bindex, vint32m8_t value, @@ -645,7 +645,7 @@ void test_vsoxei16_v_i32m8(int32_t *base, vuint16m4_t bindex, vint32m8_t value, // CHECK-RV64-LABEL: @test_vsoxei32_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, @@ -657,7 +657,7 @@ void test_vsoxei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t value, @@ -669,7 +669,7 @@ void test_vsoxei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t value, // CHECK-RV64-LABEL: @test_vsoxei32_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t value, @@ -681,7 +681,7 @@ void test_vsoxei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t value, // CHECK-RV64-LABEL: @test_vsoxei32_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t value, @@ -693,7 +693,7 @@ void test_vsoxei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t value, // CHECK-RV64-LABEL: @test_vsoxei32_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i32m8(int32_t *base, vuint32m8_t bindex, vint32m8_t value, @@ -705,7 +705,7 @@ void test_vsoxei32_v_i32m8(int32_t *base, vuint32m8_t bindex, vint32m8_t value, // CHECK-RV64-LABEL: @test_vsoxei64_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, @@ -717,7 +717,7 @@ void test_vsoxei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t value, @@ -729,7 +729,7 @@ void test_vsoxei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t value, // CHECK-RV64-LABEL: @test_vsoxei64_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t value, @@ -741,7 +741,7 @@ void test_vsoxei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t value, // CHECK-RV64-LABEL: @test_vsoxei64_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t value, @@ -753,7 +753,7 @@ void test_vsoxei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t value, @@ -765,7 +765,7 @@ void test_vsoxei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t value, @@ -777,7 +777,7 @@ void test_vsoxei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t value, @@ -789,7 +789,7 @@ void test_vsoxei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i64m8(int64_t *base, vuint8m1_t bindex, vint64m8_t value, @@ -801,7 +801,7 @@ void test_vsoxei8_v_i64m8(int64_t *base, vuint8m1_t bindex, vint64m8_t value, // CHECK-RV64-LABEL: @test_vsoxei16_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t value, @@ -813,7 +813,7 @@ void test_vsoxei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t value, // CHECK-RV64-LABEL: @test_vsoxei16_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t value, @@ -825,7 +825,7 @@ void test_vsoxei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t value, // CHECK-RV64-LABEL: @test_vsoxei16_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t value, @@ -837,7 +837,7 @@ void test_vsoxei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t value, // CHECK-RV64-LABEL: @test_vsoxei16_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i64m8(int64_t *base, vuint16m2_t bindex, vint64m8_t value, @@ -849,7 +849,7 @@ void test_vsoxei16_v_i64m8(int64_t *base, vuint16m2_t bindex, vint64m8_t value, // CHECK-RV64-LABEL: @test_vsoxei32_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t value, @@ -861,7 +861,7 @@ void test_vsoxei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t value, // CHECK-RV64-LABEL: @test_vsoxei32_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t value, @@ -873,7 +873,7 @@ void test_vsoxei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t value, // CHECK-RV64-LABEL: @test_vsoxei32_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t value, @@ -885,7 +885,7 @@ void test_vsoxei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t value, // CHECK-RV64-LABEL: @test_vsoxei32_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i64m8(int64_t *base, vuint32m4_t bindex, vint64m8_t value, @@ -897,7 +897,7 @@ void test_vsoxei32_v_i64m8(int64_t *base, vuint32m4_t bindex, vint64m8_t value, // CHECK-RV64-LABEL: @test_vsoxei64_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t value, @@ -909,7 +909,7 @@ void test_vsoxei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t value, // CHECK-RV64-LABEL: @test_vsoxei64_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t value, @@ -921,7 +921,7 @@ void test_vsoxei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t value, // CHECK-RV64-LABEL: @test_vsoxei64_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t value, @@ -933,7 +933,7 @@ void test_vsoxei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t value, // CHECK-RV64-LABEL: @test_vsoxei64_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i64m8(int64_t *base, vuint64m8_t bindex, vint64m8_t value, @@ -945,7 +945,7 @@ void test_vsoxei64_v_i64m8(int64_t *base, vuint64m8_t bindex, vint64m8_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value, @@ -957,7 +957,7 @@ void test_vsoxei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value, @@ -969,7 +969,7 @@ void test_vsoxei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value, @@ -981,7 +981,7 @@ void test_vsoxei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t value, @@ -993,7 +993,7 @@ void test_vsoxei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t value, @@ -1005,7 +1005,7 @@ void test_vsoxei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i8.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i8.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u8m4(uint8_t *base, vuint8m4_t bindex, vuint8m4_t value, @@ -1017,7 +1017,7 @@ void test_vsoxei8_v_u8m4(uint8_t *base, vuint8m4_t bindex, vuint8m4_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_u8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv64i8.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv64i8.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u8m8(uint8_t *base, vuint8m8_t bindex, vuint8m8_t value, @@ -1029,7 +1029,7 @@ void test_vsoxei8_v_u8m8(uint8_t *base, vuint8m8_t bindex, vuint8m8_t value, // CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, @@ -1041,7 +1041,7 @@ void test_vsoxei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, @@ -1053,7 +1053,7 @@ void test_vsoxei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value, @@ -1065,7 +1065,7 @@ void test_vsoxei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value, // CHECK-RV64-LABEL: @test_vsoxei16_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t value, @@ -1077,7 +1077,7 @@ void test_vsoxei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t value, // CHECK-RV64-LABEL: @test_vsoxei16_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t value, @@ -1089,7 +1089,7 @@ void test_vsoxei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t value, // CHECK-RV64-LABEL: @test_vsoxei16_v_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i8.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i8.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u8m4(uint8_t *base, vuint16m8_t bindex, vuint8m4_t value, @@ -1101,7 +1101,7 @@ void test_vsoxei16_v_u8m4(uint8_t *base, vuint16m8_t bindex, vuint8m4_t value, // CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, @@ -1113,7 +1113,7 @@ void test_vsoxei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value, @@ -1125,7 +1125,7 @@ void test_vsoxei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value, // CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value, @@ -1137,7 +1137,7 @@ void test_vsoxei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value, // CHECK-RV64-LABEL: @test_vsoxei32_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t value, @@ -1149,7 +1149,7 @@ void test_vsoxei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t value, // CHECK-RV64-LABEL: @test_vsoxei32_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t value, @@ -1161,7 +1161,7 @@ void test_vsoxei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t value, // CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value, @@ -1173,7 +1173,7 @@ void test_vsoxei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value, // CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value, @@ -1185,7 +1185,7 @@ void test_vsoxei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value, // CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value, @@ -1197,7 +1197,7 @@ void test_vsoxei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value, // CHECK-RV64-LABEL: @test_vsoxei64_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t value, @@ -1209,7 +1209,7 @@ void test_vsoxei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, @@ -1221,7 +1221,7 @@ void test_vsoxei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, @@ -1233,7 +1233,7 @@ void test_vsoxei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value, @@ -1245,7 +1245,7 @@ void test_vsoxei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t value, @@ -1257,7 +1257,7 @@ void test_vsoxei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u16m4(uint16_t *base, vuint8m2_t bindex, vuint16m4_t value, @@ -1269,7 +1269,7 @@ void test_vsoxei8_v_u16m4(uint16_t *base, vuint8m2_t bindex, vuint16m4_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u16m8(uint16_t *base, vuint8m4_t bindex, vuint16m8_t value, @@ -1281,7 +1281,7 @@ void test_vsoxei8_v_u16m8(uint16_t *base, vuint8m4_t bindex, vuint16m8_t value, // CHECK-RV64-LABEL: @test_vsoxei16_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, @@ -1293,7 +1293,7 @@ void test_vsoxei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, @@ -1305,7 +1305,7 @@ void test_vsoxei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, @@ -1317,7 +1317,7 @@ void test_vsoxei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, @@ -1329,7 +1329,7 @@ void test_vsoxei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u16m4(uint16_t *base, vuint16m4_t bindex, @@ -1341,7 +1341,7 @@ void test_vsoxei16_v_u16m4(uint16_t *base, vuint16m4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u16m8(uint16_t *base, vuint16m8_t bindex, @@ -1353,7 +1353,7 @@ void test_vsoxei16_v_u16m8(uint16_t *base, vuint16m8_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, @@ -1365,7 +1365,7 @@ void test_vsoxei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, @@ -1377,7 +1377,7 @@ void test_vsoxei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, @@ -1389,7 +1389,7 @@ void test_vsoxei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, @@ -1401,7 +1401,7 @@ void test_vsoxei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u16m4(uint16_t *base, vuint32m8_t bindex, @@ -1413,7 +1413,7 @@ void test_vsoxei32_v_u16m4(uint16_t *base, vuint32m8_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, @@ -1425,7 +1425,7 @@ void test_vsoxei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, @@ -1437,7 +1437,7 @@ void test_vsoxei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, @@ -1449,7 +1449,7 @@ void test_vsoxei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, @@ -1461,7 +1461,7 @@ void test_vsoxei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, @@ -1473,7 +1473,7 @@ void test_vsoxei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, @@ -1485,7 +1485,7 @@ void test_vsoxei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, @@ -1497,7 +1497,7 @@ void test_vsoxei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, @@ -1509,7 +1509,7 @@ void test_vsoxei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u32m8(uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, @@ -1521,7 +1521,7 @@ void test_vsoxei8_v_u32m8(uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, // CHECK-RV64-LABEL: @test_vsoxei16_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, @@ -1533,7 +1533,7 @@ void test_vsoxei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, @@ -1545,7 +1545,7 @@ void test_vsoxei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, @@ -1557,7 +1557,7 @@ void test_vsoxei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u32m4(uint32_t *base, vuint16m2_t bindex, @@ -1569,7 +1569,7 @@ void test_vsoxei16_v_u32m4(uint32_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u32m8(uint32_t *base, vuint16m4_t bindex, @@ -1581,7 +1581,7 @@ void test_vsoxei16_v_u32m8(uint32_t *base, vuint16m4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, @@ -1593,7 +1593,7 @@ void test_vsoxei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, @@ -1605,7 +1605,7 @@ void test_vsoxei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, @@ -1617,7 +1617,7 @@ void test_vsoxei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u32m4(uint32_t *base, vuint32m4_t bindex, @@ -1629,7 +1629,7 @@ void test_vsoxei32_v_u32m4(uint32_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u32m8(uint32_t *base, vuint32m8_t bindex, @@ -1641,7 +1641,7 @@ void test_vsoxei32_v_u32m8(uint32_t *base, vuint32m8_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, @@ -1653,7 +1653,7 @@ void test_vsoxei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, @@ -1665,7 +1665,7 @@ void test_vsoxei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, @@ -1677,7 +1677,7 @@ void test_vsoxei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u32m4(uint32_t *base, vuint64m8_t bindex, @@ -1689,7 +1689,7 @@ void test_vsoxei64_v_u32m4(uint32_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, @@ -1701,7 +1701,7 @@ void test_vsoxei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, @@ -1713,7 +1713,7 @@ void test_vsoxei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, @@ -1725,7 +1725,7 @@ void test_vsoxei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u64m8(uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, @@ -1737,7 +1737,7 @@ void test_vsoxei8_v_u64m8(uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, // CHECK-RV64-LABEL: @test_vsoxei16_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, @@ -1749,7 +1749,7 @@ void test_vsoxei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, @@ -1761,7 +1761,7 @@ void test_vsoxei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u64m4(uint64_t *base, vuint16m1_t bindex, @@ -1773,7 +1773,7 @@ void test_vsoxei16_v_u64m4(uint64_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u64m8(uint64_t *base, vuint16m2_t bindex, @@ -1785,7 +1785,7 @@ void test_vsoxei16_v_u64m8(uint64_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, @@ -1797,7 +1797,7 @@ void test_vsoxei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, @@ -1809,7 +1809,7 @@ void test_vsoxei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u64m4(uint64_t *base, vuint32m2_t bindex, @@ -1821,7 +1821,7 @@ void test_vsoxei32_v_u64m4(uint64_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u64m8(uint64_t *base, vuint32m4_t bindex, @@ -1833,7 +1833,7 @@ void test_vsoxei32_v_u64m8(uint64_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, @@ -1845,7 +1845,7 @@ void test_vsoxei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, @@ -1857,7 +1857,7 @@ void test_vsoxei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u64m4(uint64_t *base, vuint64m4_t bindex, @@ -1869,7 +1869,7 @@ void test_vsoxei64_v_u64m4(uint64_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u64m8(uint64_t *base, vuint64m8_t bindex, @@ -1881,7 +1881,7 @@ void test_vsoxei64_v_u64m8(uint64_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t value, @@ -1893,7 +1893,7 @@ void test_vsoxei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t value, @@ -1905,7 +1905,7 @@ void test_vsoxei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t value, @@ -1917,7 +1917,7 @@ void test_vsoxei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f32m4(float *base, vuint8m1_t bindex, vfloat32m4_t value, @@ -1929,7 +1929,7 @@ void test_vsoxei8_v_f32m4(float *base, vuint8m1_t bindex, vfloat32m4_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16f32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16f32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f32m8(float *base, vuint8m2_t bindex, vfloat32m8_t value, @@ -1941,7 +1941,7 @@ void test_vsoxei8_v_f32m8(float *base, vuint8m2_t bindex, vfloat32m8_t value, // CHECK-RV64-LABEL: @test_vsoxei16_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f32mf2(float *base, vuint16mf4_t bindex, @@ -1953,7 +1953,7 @@ void test_vsoxei16_v_f32mf2(float *base, vuint16mf4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t value, @@ -1965,7 +1965,7 @@ void test_vsoxei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t value, // CHECK-RV64-LABEL: @test_vsoxei16_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t value, @@ -1977,7 +1977,7 @@ void test_vsoxei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t value, // CHECK-RV64-LABEL: @test_vsoxei16_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f32m4(float *base, vuint16m2_t bindex, vfloat32m4_t value, @@ -1989,7 +1989,7 @@ void test_vsoxei16_v_f32m4(float *base, vuint16m2_t bindex, vfloat32m4_t value, // CHECK-RV64-LABEL: @test_vsoxei16_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16f32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16f32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f32m8(float *base, vuint16m4_t bindex, vfloat32m8_t value, @@ -2001,7 +2001,7 @@ void test_vsoxei16_v_f32m8(float *base, vuint16m4_t bindex, vfloat32m8_t value, // CHECK-RV64-LABEL: @test_vsoxei32_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f32mf2(float *base, vuint32mf2_t bindex, @@ -2013,7 +2013,7 @@ void test_vsoxei32_v_f32mf2(float *base, vuint32mf2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t value, @@ -2025,7 +2025,7 @@ void test_vsoxei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t value, // CHECK-RV64-LABEL: @test_vsoxei32_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t value, @@ -2037,7 +2037,7 @@ void test_vsoxei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t value, // CHECK-RV64-LABEL: @test_vsoxei32_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f32m4(float *base, vuint32m4_t bindex, vfloat32m4_t value, @@ -2049,7 +2049,7 @@ void test_vsoxei32_v_f32m4(float *base, vuint32m4_t bindex, vfloat32m4_t value, // CHECK-RV64-LABEL: @test_vsoxei32_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16f32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16f32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f32m8(float *base, vuint32m8_t bindex, vfloat32m8_t value, @@ -2061,7 +2061,7 @@ void test_vsoxei32_v_f32m8(float *base, vuint32m8_t bindex, vfloat32m8_t value, // CHECK-RV64-LABEL: @test_vsoxei64_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_f32mf2(float *base, vuint64m1_t bindex, @@ -2073,7 +2073,7 @@ void test_vsoxei64_v_f32mf2(float *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t value, @@ -2085,7 +2085,7 @@ void test_vsoxei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t value, // CHECK-RV64-LABEL: @test_vsoxei64_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t value, @@ -2097,7 +2097,7 @@ void test_vsoxei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t value, // CHECK-RV64-LABEL: @test_vsoxei64_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_f32m4(float *base, vuint64m8_t bindex, vfloat32m4_t value, @@ -2109,7 +2109,7 @@ void test_vsoxei64_v_f32m4(float *base, vuint64m8_t bindex, vfloat32m4_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t value, @@ -2121,7 +2121,7 @@ void test_vsoxei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t value, @@ -2133,7 +2133,7 @@ void test_vsoxei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f64m4(double *base, vuint8mf2_t bindex, vfloat64m4_t value, @@ -2145,7 +2145,7 @@ void test_vsoxei8_v_f64m4(double *base, vuint8mf2_t bindex, vfloat64m4_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f64m8(double *base, vuint8m1_t bindex, vfloat64m8_t value, @@ -2157,7 +2157,7 @@ void test_vsoxei8_v_f64m8(double *base, vuint8m1_t bindex, vfloat64m8_t value, // CHECK-RV64-LABEL: @test_vsoxei16_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f64m1(double *base, vuint16mf4_t bindex, @@ -2169,7 +2169,7 @@ void test_vsoxei16_v_f64m1(double *base, vuint16mf4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f64m2(double *base, vuint16mf2_t bindex, @@ -2181,7 +2181,7 @@ void test_vsoxei16_v_f64m2(double *base, vuint16mf2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f64m4(double *base, vuint16m1_t bindex, vfloat64m4_t value, @@ -2193,7 +2193,7 @@ void test_vsoxei16_v_f64m4(double *base, vuint16m1_t bindex, vfloat64m4_t value, // CHECK-RV64-LABEL: @test_vsoxei16_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f64m8(double *base, vuint16m2_t bindex, vfloat64m8_t value, @@ -2205,7 +2205,7 @@ void test_vsoxei16_v_f64m8(double *base, vuint16m2_t bindex, vfloat64m8_t value, // CHECK-RV64-LABEL: @test_vsoxei32_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f64m1(double *base, vuint32mf2_t bindex, @@ -2217,7 +2217,7 @@ void test_vsoxei32_v_f64m1(double *base, vuint32mf2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t value, @@ -2229,7 +2229,7 @@ void test_vsoxei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t value, // CHECK-RV64-LABEL: @test_vsoxei32_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f64m4(double *base, vuint32m2_t bindex, vfloat64m4_t value, @@ -2241,7 +2241,7 @@ void test_vsoxei32_v_f64m4(double *base, vuint32m2_t bindex, vfloat64m4_t value, // CHECK-RV64-LABEL: @test_vsoxei32_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f64m8(double *base, vuint32m4_t bindex, vfloat64m8_t value, @@ -2253,7 +2253,7 @@ void test_vsoxei32_v_f64m8(double *base, vuint32m4_t bindex, vfloat64m8_t value, // CHECK-RV64-LABEL: @test_vsoxei64_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t value, @@ -2265,7 +2265,7 @@ void test_vsoxei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t value, // CHECK-RV64-LABEL: @test_vsoxei64_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t value, @@ -2277,7 +2277,7 @@ void test_vsoxei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t value, // CHECK-RV64-LABEL: @test_vsoxei64_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_f64m4(double *base, vuint64m4_t bindex, vfloat64m4_t value, @@ -2289,7 +2289,7 @@ void test_vsoxei64_v_f64m4(double *base, vuint64m4_t bindex, vfloat64m4_t value, // CHECK-RV64-LABEL: @test_vsoxei64_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_f64m8(double *base, vuint64m8_t bindex, vfloat64m8_t value, @@ -2301,7 +2301,7 @@ void test_vsoxei64_v_f64m8(double *base, vuint64m8_t bindex, vfloat64m8_t value, // CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, @@ -2313,7 +2313,7 @@ void test_vsoxei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, @@ -2325,7 +2325,7 @@ void test_vsoxei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, @@ -2337,7 +2337,7 @@ void test_vsoxei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, @@ -2349,7 +2349,7 @@ void test_vsoxei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, @@ -2361,7 +2361,7 @@ void test_vsoxei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i8m4_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, @@ -2373,7 +2373,7 @@ void test_vsoxei8_v_i8m4_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_i8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv64i8.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv64i8.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i8m8_m(vbool1_t mask, int8_t *base, vuint8m8_t bindex, @@ -2385,7 +2385,7 @@ void test_vsoxei8_v_i8m8_m(vbool1_t mask, int8_t *base, vuint8m8_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, @@ -2397,7 +2397,7 @@ void test_vsoxei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, @@ -2409,7 +2409,7 @@ void test_vsoxei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, @@ -2421,7 +2421,7 @@ void test_vsoxei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, @@ -2433,7 +2433,7 @@ void test_vsoxei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, @@ -2445,7 +2445,7 @@ void test_vsoxei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i8m4_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, @@ -2457,7 +2457,7 @@ void test_vsoxei16_v_i8m4_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, @@ -2469,7 +2469,7 @@ void test_vsoxei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, @@ -2481,7 +2481,7 @@ void test_vsoxei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, @@ -2493,7 +2493,7 @@ void test_vsoxei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, @@ -2505,7 +2505,7 @@ void test_vsoxei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, @@ -2517,7 +2517,7 @@ void test_vsoxei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, @@ -2529,7 +2529,7 @@ void test_vsoxei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, @@ -2541,7 +2541,7 @@ void test_vsoxei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, @@ -2553,7 +2553,7 @@ void test_vsoxei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, @@ -2565,7 +2565,7 @@ void test_vsoxei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, @@ -2577,7 +2577,7 @@ void test_vsoxei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, @@ -2589,7 +2589,7 @@ void test_vsoxei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, @@ -2601,7 +2601,7 @@ void test_vsoxei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, @@ -2613,7 +2613,7 @@ void test_vsoxei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i16m4_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, @@ -2625,7 +2625,7 @@ void test_vsoxei8_v_i16m4_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i16m8_m(vbool2_t mask, int16_t *base, vuint8m4_t bindex, @@ -2637,7 +2637,7 @@ void test_vsoxei8_v_i16m8_m(vbool2_t mask, int16_t *base, vuint8m4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i16mf4_m(vbool64_t mask, int16_t *base, @@ -2650,7 +2650,7 @@ void test_vsoxei16_v_i16mf4_m(vbool64_t mask, int16_t *base, // CHECK-RV64-LABEL: @test_vsoxei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i16mf2_m(vbool32_t mask, int16_t *base, @@ -2663,7 +2663,7 @@ void test_vsoxei16_v_i16mf2_m(vbool32_t mask, int16_t *base, // CHECK-RV64-LABEL: @test_vsoxei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, @@ -2675,7 +2675,7 @@ void test_vsoxei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, @@ -2687,7 +2687,7 @@ void test_vsoxei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i16m4_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, @@ -2699,7 +2699,7 @@ void test_vsoxei16_v_i16m4_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i16m8_m(vbool2_t mask, int16_t *base, vuint16m8_t bindex, @@ -2711,7 +2711,7 @@ void test_vsoxei16_v_i16m8_m(vbool2_t mask, int16_t *base, vuint16m8_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i16mf4_m(vbool64_t mask, int16_t *base, @@ -2724,7 +2724,7 @@ void test_vsoxei32_v_i16mf4_m(vbool64_t mask, int16_t *base, // CHECK-RV64-LABEL: @test_vsoxei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, @@ -2736,7 +2736,7 @@ void test_vsoxei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, @@ -2748,7 +2748,7 @@ void test_vsoxei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, @@ -2760,7 +2760,7 @@ void test_vsoxei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i16m4_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, @@ -2772,7 +2772,7 @@ void test_vsoxei32_v_i16m4_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, @@ -2784,7 +2784,7 @@ void test_vsoxei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, @@ -2796,7 +2796,7 @@ void test_vsoxei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, @@ -2808,7 +2808,7 @@ void test_vsoxei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, @@ -2820,7 +2820,7 @@ void test_vsoxei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, @@ -2832,7 +2832,7 @@ void test_vsoxei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, @@ -2844,7 +2844,7 @@ void test_vsoxei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, @@ -2856,7 +2856,7 @@ void test_vsoxei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, @@ -2868,7 +2868,7 @@ void test_vsoxei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i32m8_m(vbool4_t mask, int32_t *base, vuint8m2_t bindex, @@ -2880,7 +2880,7 @@ void test_vsoxei8_v_i32m8_m(vbool4_t mask, int32_t *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i32mf2_m(vbool64_t mask, int32_t *base, @@ -2893,7 +2893,7 @@ void test_vsoxei16_v_i32mf2_m(vbool64_t mask, int32_t *base, // CHECK-RV64-LABEL: @test_vsoxei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, @@ -2905,7 +2905,7 @@ void test_vsoxei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, @@ -2917,7 +2917,7 @@ void test_vsoxei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, @@ -2929,7 +2929,7 @@ void test_vsoxei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i32m8_m(vbool4_t mask, int32_t *base, vuint16m4_t bindex, @@ -2941,7 +2941,7 @@ void test_vsoxei16_v_i32m8_m(vbool4_t mask, int32_t *base, vuint16m4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i32mf2_m(vbool64_t mask, int32_t *base, @@ -2954,7 +2954,7 @@ void test_vsoxei32_v_i32mf2_m(vbool64_t mask, int32_t *base, // CHECK-RV64-LABEL: @test_vsoxei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, @@ -2966,7 +2966,7 @@ void test_vsoxei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, @@ -2978,7 +2978,7 @@ void test_vsoxei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, @@ -2990,7 +2990,7 @@ void test_vsoxei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i32m8_m(vbool4_t mask, int32_t *base, vuint32m8_t bindex, @@ -3002,7 +3002,7 @@ void test_vsoxei32_v_i32m8_m(vbool4_t mask, int32_t *base, vuint32m8_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, @@ -3014,7 +3014,7 @@ void test_vsoxei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, @@ -3026,7 +3026,7 @@ void test_vsoxei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, @@ -3038,7 +3038,7 @@ void test_vsoxei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, @@ -3050,7 +3050,7 @@ void test_vsoxei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, @@ -3062,7 +3062,7 @@ void test_vsoxei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, @@ -3074,7 +3074,7 @@ void test_vsoxei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, @@ -3086,7 +3086,7 @@ void test_vsoxei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_i64m8_m(vbool8_t mask, int64_t *base, vuint8m1_t bindex, @@ -3098,7 +3098,7 @@ void test_vsoxei8_v_i64m8_m(vbool8_t mask, int64_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, @@ -3110,7 +3110,7 @@ void test_vsoxei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, @@ -3122,7 +3122,7 @@ void test_vsoxei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, @@ -3134,7 +3134,7 @@ void test_vsoxei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_i64m8_m(vbool8_t mask, int64_t *base, vuint16m2_t bindex, @@ -3146,7 +3146,7 @@ void test_vsoxei16_v_i64m8_m(vbool8_t mask, int64_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, @@ -3158,7 +3158,7 @@ void test_vsoxei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, @@ -3170,7 +3170,7 @@ void test_vsoxei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, @@ -3182,7 +3182,7 @@ void test_vsoxei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_i64m8_m(vbool8_t mask, int64_t *base, vuint32m4_t bindex, @@ -3194,7 +3194,7 @@ void test_vsoxei32_v_i64m8_m(vbool8_t mask, int64_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, @@ -3206,7 +3206,7 @@ void test_vsoxei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, @@ -3218,7 +3218,7 @@ void test_vsoxei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, @@ -3230,7 +3230,7 @@ void test_vsoxei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_i64m8_m(vbool8_t mask, int64_t *base, vuint64m8_t bindex, @@ -3242,7 +3242,7 @@ void test_vsoxei64_v_i64m8_m(vbool8_t mask, int64_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, @@ -3254,7 +3254,7 @@ void test_vsoxei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, @@ -3266,7 +3266,7 @@ void test_vsoxei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, @@ -3278,7 +3278,7 @@ void test_vsoxei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, @@ -3290,7 +3290,7 @@ void test_vsoxei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, @@ -3302,7 +3302,7 @@ void test_vsoxei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, @@ -3314,7 +3314,7 @@ void test_vsoxei8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_u8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv64i8.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv64i8.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u8m8_m(vbool1_t mask, uint8_t *base, vuint8m8_t bindex, @@ -3326,7 +3326,7 @@ void test_vsoxei8_v_u8m8_m(vbool1_t mask, uint8_t *base, vuint8m8_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, @@ -3338,7 +3338,7 @@ void test_vsoxei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, @@ -3350,7 +3350,7 @@ void test_vsoxei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, @@ -3362,7 +3362,7 @@ void test_vsoxei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, @@ -3374,7 +3374,7 @@ void test_vsoxei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, @@ -3386,7 +3386,7 @@ void test_vsoxei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, @@ -3398,7 +3398,7 @@ void test_vsoxei16_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, @@ -3410,7 +3410,7 @@ void test_vsoxei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, @@ -3422,7 +3422,7 @@ void test_vsoxei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, @@ -3434,7 +3434,7 @@ void test_vsoxei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, @@ -3446,7 +3446,7 @@ void test_vsoxei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, @@ -3458,7 +3458,7 @@ void test_vsoxei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, @@ -3470,7 +3470,7 @@ void test_vsoxei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, @@ -3482,7 +3482,7 @@ void test_vsoxei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, @@ -3494,7 +3494,7 @@ void test_vsoxei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, @@ -3506,7 +3506,7 @@ void test_vsoxei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, @@ -3518,7 +3518,7 @@ void test_vsoxei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, @@ -3530,7 +3530,7 @@ void test_vsoxei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, @@ -3542,7 +3542,7 @@ void test_vsoxei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, @@ -3554,7 +3554,7 @@ void test_vsoxei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, @@ -3566,7 +3566,7 @@ void test_vsoxei8_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint8m4_t bindex, @@ -3578,7 +3578,7 @@ void test_vsoxei8_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint8m4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, @@ -3591,7 +3591,7 @@ void test_vsoxei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, // CHECK-RV64-LABEL: @test_vsoxei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, @@ -3604,7 +3604,7 @@ void test_vsoxei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, // CHECK-RV64-LABEL: @test_vsoxei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, @@ -3616,7 +3616,7 @@ void test_vsoxei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, @@ -3628,7 +3628,7 @@ void test_vsoxei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex, @@ -3640,7 +3640,7 @@ void test_vsoxei16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint16m8_t bindex, @@ -3652,7 +3652,7 @@ void test_vsoxei16_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint16m8_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, @@ -3665,7 +3665,7 @@ void test_vsoxei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, // CHECK-RV64-LABEL: @test_vsoxei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, @@ -3678,7 +3678,7 @@ void test_vsoxei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, // CHECK-RV64-LABEL: @test_vsoxei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, @@ -3690,7 +3690,7 @@ void test_vsoxei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, @@ -3702,7 +3702,7 @@ void test_vsoxei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, @@ -3714,7 +3714,7 @@ void test_vsoxei32_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, @@ -3727,7 +3727,7 @@ void test_vsoxei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, // CHECK-RV64-LABEL: @test_vsoxei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, @@ -3740,7 +3740,7 @@ void test_vsoxei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, // CHECK-RV64-LABEL: @test_vsoxei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, @@ -3752,7 +3752,7 @@ void test_vsoxei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, @@ -3764,7 +3764,7 @@ void test_vsoxei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, @@ -3776,7 +3776,7 @@ void test_vsoxei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, @@ -3788,7 +3788,7 @@ void test_vsoxei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, @@ -3800,7 +3800,7 @@ void test_vsoxei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, @@ -3812,7 +3812,7 @@ void test_vsoxei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint8m2_t bindex, @@ -3824,7 +3824,7 @@ void test_vsoxei8_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, @@ -3837,7 +3837,7 @@ void test_vsoxei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, // CHECK-RV64-LABEL: @test_vsoxei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u32m1_m(vbool32_t mask, uint32_t *base, @@ -3850,7 +3850,7 @@ void test_vsoxei16_v_u32m1_m(vbool32_t mask, uint32_t *base, // CHECK-RV64-LABEL: @test_vsoxei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, @@ -3862,7 +3862,7 @@ void test_vsoxei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, @@ -3874,7 +3874,7 @@ void test_vsoxei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint16m4_t bindex, @@ -3886,7 +3886,7 @@ void test_vsoxei16_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint16m4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, @@ -3899,7 +3899,7 @@ void test_vsoxei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, // CHECK-RV64-LABEL: @test_vsoxei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, @@ -3911,7 +3911,7 @@ void test_vsoxei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, @@ -3923,7 +3923,7 @@ void test_vsoxei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, @@ -3935,7 +3935,7 @@ void test_vsoxei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t bindex, @@ -3947,7 +3947,7 @@ void test_vsoxei32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, @@ -3960,7 +3960,7 @@ void test_vsoxei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, // CHECK-RV64-LABEL: @test_vsoxei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, @@ -3972,7 +3972,7 @@ void test_vsoxei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, @@ -3984,7 +3984,7 @@ void test_vsoxei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, @@ -3996,7 +3996,7 @@ void test_vsoxei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, @@ -4008,7 +4008,7 @@ void test_vsoxei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, @@ -4020,7 +4020,7 @@ void test_vsoxei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, @@ -4032,7 +4032,7 @@ void test_vsoxei8_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint8m1_t bindex, @@ -4044,7 +4044,7 @@ void test_vsoxei8_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u64m1_m(vbool64_t mask, uint64_t *base, @@ -4057,7 +4057,7 @@ void test_vsoxei16_v_u64m1_m(vbool64_t mask, uint64_t *base, // CHECK-RV64-LABEL: @test_vsoxei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u64m2_m(vbool32_t mask, uint64_t *base, @@ -4070,7 +4070,7 @@ void test_vsoxei16_v_u64m2_m(vbool32_t mask, uint64_t *base, // CHECK-RV64-LABEL: @test_vsoxei16_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, @@ -4082,7 +4082,7 @@ void test_vsoxei16_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint16m2_t bindex, @@ -4094,7 +4094,7 @@ void test_vsoxei16_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u64m1_m(vbool64_t mask, uint64_t *base, @@ -4107,7 +4107,7 @@ void test_vsoxei32_v_u64m1_m(vbool64_t mask, uint64_t *base, // CHECK-RV64-LABEL: @test_vsoxei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, @@ -4119,7 +4119,7 @@ void test_vsoxei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, @@ -4131,7 +4131,7 @@ void test_vsoxei32_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint32m4_t bindex, @@ -4143,7 +4143,7 @@ void test_vsoxei32_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, @@ -4155,7 +4155,7 @@ void test_vsoxei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, @@ -4167,7 +4167,7 @@ void test_vsoxei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, @@ -4179,7 +4179,7 @@ void test_vsoxei64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint64m8_t bindex, @@ -4191,7 +4191,7 @@ void test_vsoxei64_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, @@ -4203,7 +4203,7 @@ void test_vsoxei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, @@ -4215,7 +4215,7 @@ void test_vsoxei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, @@ -4227,7 +4227,7 @@ void test_vsoxei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f32m4_m(vbool8_t mask, float *base, vuint8m1_t bindex, @@ -4239,7 +4239,7 @@ void test_vsoxei8_v_f32m4_m(vbool8_t mask, float *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f32m8_m(vbool4_t mask, float *base, vuint8m2_t bindex, @@ -4251,7 +4251,7 @@ void test_vsoxei8_v_f32m8_m(vbool4_t mask, float *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, @@ -4263,7 +4263,7 @@ void test_vsoxei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, @@ -4275,7 +4275,7 @@ void test_vsoxei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, @@ -4287,7 +4287,7 @@ void test_vsoxei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f32m4_m(vbool8_t mask, float *base, vuint16m2_t bindex, @@ -4299,7 +4299,7 @@ void test_vsoxei16_v_f32m4_m(vbool8_t mask, float *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f32m8_m(vbool4_t mask, float *base, vuint16m4_t bindex, @@ -4311,7 +4311,7 @@ void test_vsoxei16_v_f32m8_m(vbool4_t mask, float *base, vuint16m4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, @@ -4323,7 +4323,7 @@ void test_vsoxei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, @@ -4335,7 +4335,7 @@ void test_vsoxei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, @@ -4347,7 +4347,7 @@ void test_vsoxei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f32m4_m(vbool8_t mask, float *base, vuint32m4_t bindex, @@ -4359,7 +4359,7 @@ void test_vsoxei32_v_f32m4_m(vbool8_t mask, float *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f32m8_m(vbool4_t mask, float *base, vuint32m8_t bindex, @@ -4371,7 +4371,7 @@ void test_vsoxei32_v_f32m8_m(vbool4_t mask, float *base, vuint32m8_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, @@ -4383,7 +4383,7 @@ void test_vsoxei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, @@ -4395,7 +4395,7 @@ void test_vsoxei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, @@ -4407,7 +4407,7 @@ void test_vsoxei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_f32m4_m(vbool8_t mask, float *base, vuint64m8_t bindex, @@ -4419,7 +4419,7 @@ void test_vsoxei64_v_f32m4_m(vbool8_t mask, float *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, @@ -4431,7 +4431,7 @@ void test_vsoxei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, @@ -4443,7 +4443,7 @@ void test_vsoxei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f64m4_m(vbool16_t mask, double *base, vuint8mf2_t bindex, @@ -4455,7 +4455,7 @@ void test_vsoxei8_v_f64m4_m(vbool16_t mask, double *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei8_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei8_v_f64m8_m(vbool8_t mask, double *base, vuint8m1_t bindex, @@ -4467,7 +4467,7 @@ void test_vsoxei8_v_f64m8_m(vbool8_t mask, double *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, @@ -4479,7 +4479,7 @@ void test_vsoxei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, @@ -4491,7 +4491,7 @@ void test_vsoxei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f64m4_m(vbool16_t mask, double *base, vuint16m1_t bindex, @@ -4503,7 +4503,7 @@ void test_vsoxei16_v_f64m4_m(vbool16_t mask, double *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei16_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei16_v_f64m8_m(vbool8_t mask, double *base, vuint16m2_t bindex, @@ -4515,7 +4515,7 @@ void test_vsoxei16_v_f64m8_m(vbool8_t mask, double *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, @@ -4527,7 +4527,7 @@ void test_vsoxei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, @@ -4539,7 +4539,7 @@ void test_vsoxei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f64m4_m(vbool16_t mask, double *base, vuint32m2_t bindex, @@ -4551,7 +4551,7 @@ void test_vsoxei32_v_f64m4_m(vbool16_t mask, double *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei32_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei32_v_f64m8_m(vbool8_t mask, double *base, vuint32m4_t bindex, @@ -4563,7 +4563,7 @@ void test_vsoxei32_v_f64m8_m(vbool8_t mask, double *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, @@ -4575,7 +4575,7 @@ void test_vsoxei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, @@ -4587,7 +4587,7 @@ void test_vsoxei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_f64m4_m(vbool16_t mask, double *base, vuint64m4_t bindex, @@ -4599,7 +4599,7 @@ void test_vsoxei64_v_f64m4_m(vbool16_t mask, double *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: @test_vsoxei64_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsoxei64_v_f64m8_m(vbool8_t mask, double *base, vuint64m8_t bindex, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c index c88d491..389f4b0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c @@ -447,7 +447,7 @@ vint64m8_t test_vsra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { // // CHECK-RV64-LABEL: @test_vsra_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -457,7 +457,7 @@ vint8mf8_t test_vsra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t // // CHECK-RV64-LABEL: @test_vsra_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { @@ -467,7 +467,7 @@ vint8mf8_t test_vsra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t // // CHECK-RV64-LABEL: @test_vsra_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -477,7 +477,7 @@ vint8mf4_t test_vsra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t // // CHECK-RV64-LABEL: @test_vsra_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { @@ -487,7 +487,7 @@ vint8mf4_t test_vsra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t // // CHECK-RV64-LABEL: @test_vsra_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -497,7 +497,7 @@ vint8mf2_t test_vsra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t // // CHECK-RV64-LABEL: @test_vsra_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { @@ -507,7 +507,7 @@ vint8mf2_t test_vsra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t // // CHECK-RV64-LABEL: @test_vsra_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsra_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -517,7 +517,7 @@ vint8m1_t test_vsra_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, // // CHECK-RV64-LABEL: @test_vsra_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsra_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { @@ -527,7 +527,7 @@ vint8m1_t test_vsra_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, // // CHECK-RV64-LABEL: @test_vsra_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsra_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -537,7 +537,7 @@ vint8m2_t test_vsra_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, // // CHECK-RV64-LABEL: @test_vsra_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsra_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { @@ -547,7 +547,7 @@ vint8m2_t test_vsra_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, // // CHECK-RV64-LABEL: @test_vsra_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsra_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -557,7 +557,7 @@ vint8m4_t test_vsra_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, // // CHECK-RV64-LABEL: @test_vsra_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsra_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { @@ -567,7 +567,7 @@ vint8m4_t test_vsra_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, // // CHECK-RV64-LABEL: @test_vsra_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsra_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -577,7 +577,7 @@ vint8m8_t test_vsra_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, // // CHECK-RV64-LABEL: @test_vsra_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsra_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { @@ -587,7 +587,7 @@ vint8m8_t test_vsra_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, // // CHECK-RV64-LABEL: @test_vsra_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { @@ -597,7 +597,7 @@ vint16mf4_t test_vsra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vsra_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { @@ -607,7 +607,7 @@ vint16mf4_t test_vsra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vsra_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { @@ -617,7 +617,7 @@ vint16mf2_t test_vsra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vsra_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { @@ -627,7 +627,7 @@ vint16mf2_t test_vsra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vsra_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsra_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -637,7 +637,7 @@ vint16m1_t test_vsra_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t // // CHECK-RV64-LABEL: @test_vsra_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsra_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { @@ -647,7 +647,7 @@ vint16m1_t test_vsra_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t // // CHECK-RV64-LABEL: @test_vsra_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsra_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -657,7 +657,7 @@ vint16m2_t test_vsra_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // // CHECK-RV64-LABEL: @test_vsra_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsra_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { @@ -667,7 +667,7 @@ vint16m2_t test_vsra_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // // CHECK-RV64-LABEL: @test_vsra_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsra_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -677,7 +677,7 @@ vint16m4_t test_vsra_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // // CHECK-RV64-LABEL: @test_vsra_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsra_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { @@ -687,7 +687,7 @@ vint16m4_t test_vsra_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // // CHECK-RV64-LABEL: @test_vsra_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsra_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -697,7 +697,7 @@ vint16m8_t test_vsra_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // // CHECK-RV64-LABEL: @test_vsra_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsra_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { @@ -707,7 +707,7 @@ vint16m8_t test_vsra_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // // CHECK-RV64-LABEL: @test_vsra_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { @@ -717,7 +717,7 @@ vint32mf2_t test_vsra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m // // CHECK-RV64-LABEL: @test_vsra_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { @@ -727,7 +727,7 @@ vint32mf2_t test_vsra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m // // CHECK-RV64-LABEL: @test_vsra_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsra_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -737,7 +737,7 @@ vint32m1_t test_vsra_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t // // CHECK-RV64-LABEL: @test_vsra_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsra_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { @@ -747,7 +747,7 @@ vint32m1_t test_vsra_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t // // CHECK-RV64-LABEL: @test_vsra_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsra_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -757,7 +757,7 @@ vint32m2_t test_vsra_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t // // CHECK-RV64-LABEL: @test_vsra_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsra_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { @@ -767,7 +767,7 @@ vint32m2_t test_vsra_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t // // CHECK-RV64-LABEL: @test_vsra_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsra_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -777,7 +777,7 @@ vint32m4_t test_vsra_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // // CHECK-RV64-LABEL: @test_vsra_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsra_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { @@ -787,7 +787,7 @@ vint32m4_t test_vsra_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // // CHECK-RV64-LABEL: @test_vsra_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsra_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -797,7 +797,7 @@ vint32m8_t test_vsra_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // // CHECK-RV64-LABEL: @test_vsra_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsra_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { @@ -807,7 +807,7 @@ vint32m8_t test_vsra_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // // CHECK-RV64-LABEL: @test_vsra_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsra_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -817,7 +817,7 @@ vint64m1_t test_vsra_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t // // CHECK-RV64-LABEL: @test_vsra_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsra_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { @@ -827,7 +827,7 @@ vint64m1_t test_vsra_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t // // CHECK-RV64-LABEL: @test_vsra_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsra_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -837,7 +837,7 @@ vint64m2_t test_vsra_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t // // CHECK-RV64-LABEL: @test_vsra_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsra_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { @@ -847,7 +847,7 @@ vint64m2_t test_vsra_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t // // CHECK-RV64-LABEL: @test_vsra_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsra_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -857,7 +857,7 @@ vint64m4_t test_vsra_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t // // CHECK-RV64-LABEL: @test_vsra_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsra_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { @@ -867,7 +867,7 @@ vint64m4_t test_vsra_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t // // CHECK-RV64-LABEL: @test_vsra_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsra_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -877,7 +877,7 @@ vint64m8_t test_vsra_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // // CHECK-RV64-LABEL: @test_vsra_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsra_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c index 71c9973..afc25dc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c @@ -447,7 +447,7 @@ vuint64m8_t test_vsrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { // // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -457,7 +457,7 @@ vuint8mf8_t test_vsrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf // // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { @@ -467,7 +467,7 @@ vuint8mf8_t test_vsrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf // // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -477,7 +477,7 @@ vuint8mf4_t test_vsrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf // // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { @@ -487,7 +487,7 @@ vuint8mf4_t test_vsrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf // // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -497,7 +497,7 @@ vuint8mf2_t test_vsrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf // // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { @@ -507,7 +507,7 @@ vuint8mf2_t test_vsrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf // // CHECK-RV64-LABEL: @test_vsrl_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -517,7 +517,7 @@ vuint8m1_t test_vsrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t o // // CHECK-RV64-LABEL: @test_vsrl_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { @@ -527,7 +527,7 @@ vuint8m1_t test_vsrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t o // // CHECK-RV64-LABEL: @test_vsrl_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -537,7 +537,7 @@ vuint8m2_t test_vsrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t o // // CHECK-RV64-LABEL: @test_vsrl_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { @@ -547,7 +547,7 @@ vuint8m2_t test_vsrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t o // // CHECK-RV64-LABEL: @test_vsrl_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -557,7 +557,7 @@ vuint8m4_t test_vsrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t o // // CHECK-RV64-LABEL: @test_vsrl_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { @@ -567,7 +567,7 @@ vuint8m4_t test_vsrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t o // // CHECK-RV64-LABEL: @test_vsrl_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -577,7 +577,7 @@ vuint8m8_t test_vsrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t o // // CHECK-RV64-LABEL: @test_vsrl_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { @@ -587,7 +587,7 @@ vuint8m8_t test_vsrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t o // // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { @@ -597,7 +597,7 @@ vuint16mf4_t test_vsrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { @@ -607,7 +607,7 @@ vuint16mf4_t test_vsrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { @@ -617,7 +617,7 @@ vuint16mf2_t test_vsrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { @@ -627,7 +627,7 @@ vuint16mf2_t test_vsrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vsrl_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -637,7 +637,7 @@ vuint16m1_t test_vsrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m // // CHECK-RV64-LABEL: @test_vsrl_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { @@ -647,7 +647,7 @@ vuint16m1_t test_vsrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m // // CHECK-RV64-LABEL: @test_vsrl_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -657,7 +657,7 @@ vuint16m2_t test_vsrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2 // // CHECK-RV64-LABEL: @test_vsrl_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { @@ -667,7 +667,7 @@ vuint16m2_t test_vsrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2 // // CHECK-RV64-LABEL: @test_vsrl_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -677,7 +677,7 @@ vuint16m4_t test_vsrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4 // // CHECK-RV64-LABEL: @test_vsrl_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { @@ -687,7 +687,7 @@ vuint16m4_t test_vsrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4 // // CHECK-RV64-LABEL: @test_vsrl_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -697,7 +697,7 @@ vuint16m8_t test_vsrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8 // // CHECK-RV64-LABEL: @test_vsrl_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { @@ -707,7 +707,7 @@ vuint16m8_t test_vsrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8 // // CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { @@ -717,7 +717,7 @@ vuint32mf2_t test_vsrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { @@ -727,7 +727,7 @@ vuint32mf2_t test_vsrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vsrl_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -737,7 +737,7 @@ vuint32m1_t test_vsrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vsrl_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { @@ -747,7 +747,7 @@ vuint32m1_t test_vsrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vsrl_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -757,7 +757,7 @@ vuint32m2_t test_vsrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vsrl_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { @@ -767,7 +767,7 @@ vuint32m2_t test_vsrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vsrl_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -777,7 +777,7 @@ vuint32m4_t test_vsrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4 // // CHECK-RV64-LABEL: @test_vsrl_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { @@ -787,7 +787,7 @@ vuint32m4_t test_vsrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4 // // CHECK-RV64-LABEL: @test_vsrl_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -797,7 +797,7 @@ vuint32m8_t test_vsrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8 // // CHECK-RV64-LABEL: @test_vsrl_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { @@ -807,7 +807,7 @@ vuint32m8_t test_vsrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8 // // CHECK-RV64-LABEL: @test_vsrl_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -817,7 +817,7 @@ vuint64m1_t test_vsrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vsrl_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { @@ -827,7 +827,7 @@ vuint64m1_t test_vsrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vsrl_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -837,7 +837,7 @@ vuint64m2_t test_vsrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vsrl_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { @@ -847,7 +847,7 @@ vuint64m2_t test_vsrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vsrl_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -857,7 +857,7 @@ vuint64m4_t test_vsrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vsrl_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { @@ -867,7 +867,7 @@ vuint64m4_t test_vsrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vsrl_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -877,7 +877,7 @@ vuint64m8_t test_vsrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8 // // CHECK-RV64-LABEL: @test_vsrl_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsrl_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsse.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsse.c index 2073ff6..f64b6d1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsse.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsse.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vsse8_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t value, @@ -21,7 +21,7 @@ void test_vsse8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t value, // CHECK-RV64-LABEL: @test_vsse8_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t value, @@ -33,7 +33,7 @@ void test_vsse8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t value, // CHECK-RV64-LABEL: @test_vsse8_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t value, @@ -45,7 +45,7 @@ void test_vsse8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t value, // CHECK-RV64-LABEL: @test_vsse8_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t value, @@ -57,7 +57,7 @@ void test_vsse8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t value, // CHECK-RV64-LABEL: @test_vsse8_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse8_v_i8m2(int8_t *base, ptrdiff_t bstride, vint8m2_t value, @@ -69,7 +69,7 @@ void test_vsse8_v_i8m2(int8_t *base, ptrdiff_t bstride, vint8m2_t value, // CHECK-RV64-LABEL: @test_vsse8_v_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse8_v_i8m4(int8_t *base, ptrdiff_t bstride, vint8m4_t value, @@ -81,7 +81,7 @@ void test_vsse8_v_i8m4(int8_t *base, ptrdiff_t bstride, vint8m4_t value, // CHECK-RV64-LABEL: @test_vsse8_v_i8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse8_v_i8m8(int8_t *base, ptrdiff_t bstride, vint8m8_t value, @@ -93,7 +93,7 @@ void test_vsse8_v_i8m8(int8_t *base, ptrdiff_t bstride, vint8m8_t value, // CHECK-RV64-LABEL: @test_vsse16_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t value, @@ -105,7 +105,7 @@ void test_vsse16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t value, // CHECK-RV64-LABEL: @test_vsse16_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t value, @@ -117,7 +117,7 @@ void test_vsse16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t value, // CHECK-RV64-LABEL: @test_vsse16_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t value, @@ -129,7 +129,7 @@ void test_vsse16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t value, // CHECK-RV64-LABEL: @test_vsse16_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse16_v_i16m2(int16_t *base, ptrdiff_t bstride, vint16m2_t value, @@ -141,7 +141,7 @@ void test_vsse16_v_i16m2(int16_t *base, ptrdiff_t bstride, vint16m2_t value, // CHECK-RV64-LABEL: @test_vsse16_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse16_v_i16m4(int16_t *base, ptrdiff_t bstride, vint16m4_t value, @@ -153,7 +153,7 @@ void test_vsse16_v_i16m4(int16_t *base, ptrdiff_t bstride, vint16m4_t value, // CHECK-RV64-LABEL: @test_vsse16_v_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse16_v_i16m8(int16_t *base, ptrdiff_t bstride, vint16m8_t value, @@ -165,7 +165,7 @@ void test_vsse16_v_i16m8(int16_t *base, ptrdiff_t bstride, vint16m8_t value, // CHECK-RV64-LABEL: @test_vsse32_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t value, @@ -177,7 +177,7 @@ void test_vsse32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t value, // CHECK-RV64-LABEL: @test_vsse32_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t value, @@ -189,7 +189,7 @@ void test_vsse32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t value, // CHECK-RV64-LABEL: @test_vsse32_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse32_v_i32m2(int32_t *base, ptrdiff_t bstride, vint32m2_t value, @@ -201,7 +201,7 @@ void test_vsse32_v_i32m2(int32_t *base, ptrdiff_t bstride, vint32m2_t value, // CHECK-RV64-LABEL: @test_vsse32_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse32_v_i32m4(int32_t *base, ptrdiff_t bstride, vint32m4_t value, @@ -213,7 +213,7 @@ void test_vsse32_v_i32m4(int32_t *base, ptrdiff_t bstride, vint32m4_t value, // CHECK-RV64-LABEL: @test_vsse32_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse32_v_i32m8(int32_t *base, ptrdiff_t bstride, vint32m8_t value, @@ -225,7 +225,7 @@ void test_vsse32_v_i32m8(int32_t *base, ptrdiff_t bstride, vint32m8_t value, // CHECK-RV64-LABEL: @test_vsse64_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t value, @@ -237,7 +237,7 @@ void test_vsse64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t value, // CHECK-RV64-LABEL: @test_vsse64_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse64_v_i64m2(int64_t *base, ptrdiff_t bstride, vint64m2_t value, @@ -249,7 +249,7 @@ void test_vsse64_v_i64m2(int64_t *base, ptrdiff_t bstride, vint64m2_t value, // CHECK-RV64-LABEL: @test_vsse64_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse64_v_i64m4(int64_t *base, ptrdiff_t bstride, vint64m4_t value, @@ -261,7 +261,7 @@ void test_vsse64_v_i64m4(int64_t *base, ptrdiff_t bstride, vint64m4_t value, // CHECK-RV64-LABEL: @test_vsse64_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse64_v_i64m8(int64_t *base, ptrdiff_t bstride, vint64m8_t value, @@ -273,7 +273,7 @@ void test_vsse64_v_i64m8(int64_t *base, ptrdiff_t bstride, vint64m8_t value, // CHECK-RV64-LABEL: @test_vsse8_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t value, @@ -285,7 +285,7 @@ void test_vsse8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t value, // CHECK-RV64-LABEL: @test_vsse8_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t value, @@ -297,7 +297,7 @@ void test_vsse8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t value, // CHECK-RV64-LABEL: @test_vsse8_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t value, @@ -309,7 +309,7 @@ void test_vsse8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t value, // CHECK-RV64-LABEL: @test_vsse8_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t value, @@ -321,7 +321,7 @@ void test_vsse8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t value, // CHECK-RV64-LABEL: @test_vsse8_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse8_v_u8m2(uint8_t *base, ptrdiff_t bstride, vuint8m2_t value, @@ -333,7 +333,7 @@ void test_vsse8_v_u8m2(uint8_t *base, ptrdiff_t bstride, vuint8m2_t value, // CHECK-RV64-LABEL: @test_vsse8_v_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse8_v_u8m4(uint8_t *base, ptrdiff_t bstride, vuint8m4_t value, @@ -345,7 +345,7 @@ void test_vsse8_v_u8m4(uint8_t *base, ptrdiff_t bstride, vuint8m4_t value, // CHECK-RV64-LABEL: @test_vsse8_v_u8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse8_v_u8m8(uint8_t *base, ptrdiff_t bstride, vuint8m8_t value, @@ -357,7 +357,7 @@ void test_vsse8_v_u8m8(uint8_t *base, ptrdiff_t bstride, vuint8m8_t value, // CHECK-RV64-LABEL: @test_vsse16_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t value, @@ -369,7 +369,7 @@ void test_vsse16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t value, // CHECK-RV64-LABEL: @test_vsse16_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t value, @@ -381,7 +381,7 @@ void test_vsse16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t value, // CHECK-RV64-LABEL: @test_vsse16_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t value, @@ -393,7 +393,7 @@ void test_vsse16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t value, // CHECK-RV64-LABEL: @test_vsse16_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse16_v_u16m2(uint16_t *base, ptrdiff_t bstride, vuint16m2_t value, @@ -405,7 +405,7 @@ void test_vsse16_v_u16m2(uint16_t *base, ptrdiff_t bstride, vuint16m2_t value, // CHECK-RV64-LABEL: @test_vsse16_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse16_v_u16m4(uint16_t *base, ptrdiff_t bstride, vuint16m4_t value, @@ -417,7 +417,7 @@ void test_vsse16_v_u16m4(uint16_t *base, ptrdiff_t bstride, vuint16m4_t value, // CHECK-RV64-LABEL: @test_vsse16_v_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse16_v_u16m8(uint16_t *base, ptrdiff_t bstride, vuint16m8_t value, @@ -429,7 +429,7 @@ void test_vsse16_v_u16m8(uint16_t *base, ptrdiff_t bstride, vuint16m8_t value, // CHECK-RV64-LABEL: @test_vsse32_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t value, @@ -441,7 +441,7 @@ void test_vsse32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t value, // CHECK-RV64-LABEL: @test_vsse32_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t value, @@ -453,7 +453,7 @@ void test_vsse32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t value, // CHECK-RV64-LABEL: @test_vsse32_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse32_v_u32m2(uint32_t *base, ptrdiff_t bstride, vuint32m2_t value, @@ -465,7 +465,7 @@ void test_vsse32_v_u32m2(uint32_t *base, ptrdiff_t bstride, vuint32m2_t value, // CHECK-RV64-LABEL: @test_vsse32_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse32_v_u32m4(uint32_t *base, ptrdiff_t bstride, vuint32m4_t value, @@ -477,7 +477,7 @@ void test_vsse32_v_u32m4(uint32_t *base, ptrdiff_t bstride, vuint32m4_t value, // CHECK-RV64-LABEL: @test_vsse32_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse32_v_u32m8(uint32_t *base, ptrdiff_t bstride, vuint32m8_t value, @@ -489,7 +489,7 @@ void test_vsse32_v_u32m8(uint32_t *base, ptrdiff_t bstride, vuint32m8_t value, // CHECK-RV64-LABEL: @test_vsse64_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t value, @@ -501,7 +501,7 @@ void test_vsse64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t value, // CHECK-RV64-LABEL: @test_vsse64_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse64_v_u64m2(uint64_t *base, ptrdiff_t bstride, vuint64m2_t value, @@ -513,7 +513,7 @@ void test_vsse64_v_u64m2(uint64_t *base, ptrdiff_t bstride, vuint64m2_t value, // CHECK-RV64-LABEL: @test_vsse64_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse64_v_u64m4(uint64_t *base, ptrdiff_t bstride, vuint64m4_t value, @@ -525,7 +525,7 @@ void test_vsse64_v_u64m4(uint64_t *base, ptrdiff_t bstride, vuint64m4_t value, // CHECK-RV64-LABEL: @test_vsse64_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse64_v_u64m8(uint64_t *base, ptrdiff_t bstride, vuint64m8_t value, @@ -537,7 +537,7 @@ void test_vsse64_v_u64m8(uint64_t *base, ptrdiff_t bstride, vuint64m8_t value, // CHECK-RV64-LABEL: @test_vsse32_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t value, @@ -549,7 +549,7 @@ void test_vsse32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t value, // CHECK-RV64-LABEL: @test_vsse32_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t value, @@ -561,7 +561,7 @@ void test_vsse32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t value, // CHECK-RV64-LABEL: @test_vsse32_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse32_v_f32m2(float *base, ptrdiff_t bstride, vfloat32m2_t value, @@ -573,7 +573,7 @@ void test_vsse32_v_f32m2(float *base, ptrdiff_t bstride, vfloat32m2_t value, // CHECK-RV64-LABEL: @test_vsse32_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse32_v_f32m4(float *base, ptrdiff_t bstride, vfloat32m4_t value, @@ -585,7 +585,7 @@ void test_vsse32_v_f32m4(float *base, ptrdiff_t bstride, vfloat32m4_t value, // CHECK-RV64-LABEL: @test_vsse32_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse32_v_f32m8(float *base, ptrdiff_t bstride, vfloat32m8_t value, @@ -597,7 +597,7 @@ void test_vsse32_v_f32m8(float *base, ptrdiff_t bstride, vfloat32m8_t value, // CHECK-RV64-LABEL: @test_vsse64_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t value, @@ -609,7 +609,7 @@ void test_vsse64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t value, // CHECK-RV64-LABEL: @test_vsse64_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse64_v_f64m2(double *base, ptrdiff_t bstride, vfloat64m2_t value, @@ -621,7 +621,7 @@ void test_vsse64_v_f64m2(double *base, ptrdiff_t bstride, vfloat64m2_t value, // CHECK-RV64-LABEL: @test_vsse64_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse64_v_f64m4(double *base, ptrdiff_t bstride, vfloat64m4_t value, @@ -633,7 +633,7 @@ void test_vsse64_v_f64m4(double *base, ptrdiff_t bstride, vfloat64m4_t value, // CHECK-RV64-LABEL: @test_vsse64_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse64_v_f64m8(double *base, ptrdiff_t bstride, vfloat64m8_t value, @@ -645,7 +645,7 @@ void test_vsse64_v_f64m8(double *base, ptrdiff_t bstride, vfloat64m8_t value, // CHECK-RV64-LABEL: @test_vsse8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, @@ -657,7 +657,7 @@ void test_vsse8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, @@ -669,7 +669,7 @@ void test_vsse8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, @@ -681,7 +681,7 @@ void test_vsse8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, @@ -693,7 +693,7 @@ void test_vsse8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse8_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse8_v_i8m2_m(vbool4_t mask, int8_t *base, ptrdiff_t bstride, @@ -705,7 +705,7 @@ void test_vsse8_v_i8m2_m(vbool4_t mask, int8_t *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse8_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse8_v_i8m4_m(vbool2_t mask, int8_t *base, ptrdiff_t bstride, @@ -717,7 +717,7 @@ void test_vsse8_v_i8m4_m(vbool2_t mask, int8_t *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse8_v_i8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse8_v_i8m8_m(vbool1_t mask, int8_t *base, ptrdiff_t bstride, @@ -729,7 +729,7 @@ void test_vsse8_v_i8m8_m(vbool1_t mask, int8_t *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, @@ -741,7 +741,7 @@ void test_vsse16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, @@ -753,7 +753,7 @@ void test_vsse16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, @@ -765,7 +765,7 @@ void test_vsse16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse16_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse16_v_i16m2_m(vbool8_t mask, int16_t *base, ptrdiff_t bstride, @@ -777,7 +777,7 @@ void test_vsse16_v_i16m2_m(vbool8_t mask, int16_t *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse16_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse16_v_i16m4_m(vbool4_t mask, int16_t *base, ptrdiff_t bstride, @@ -789,7 +789,7 @@ void test_vsse16_v_i16m4_m(vbool4_t mask, int16_t *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse16_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse16_v_i16m8_m(vbool2_t mask, int16_t *base, ptrdiff_t bstride, @@ -801,7 +801,7 @@ void test_vsse16_v_i16m8_m(vbool2_t mask, int16_t *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, @@ -813,7 +813,7 @@ void test_vsse32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, @@ -825,7 +825,7 @@ void test_vsse32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse32_v_i32m2_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride, @@ -837,7 +837,7 @@ void test_vsse32_v_i32m2_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse32_v_i32m4_m(vbool8_t mask, int32_t *base, ptrdiff_t bstride, @@ -849,7 +849,7 @@ void test_vsse32_v_i32m4_m(vbool8_t mask, int32_t *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse32_v_i32m8_m(vbool4_t mask, int32_t *base, ptrdiff_t bstride, @@ -861,7 +861,7 @@ void test_vsse32_v_i32m8_m(vbool4_t mask, int32_t *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, @@ -873,7 +873,7 @@ void test_vsse64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse64_v_i64m2_m(vbool32_t mask, int64_t *base, ptrdiff_t bstride, @@ -885,7 +885,7 @@ void test_vsse64_v_i64m2_m(vbool32_t mask, int64_t *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse64_v_i64m4_m(vbool16_t mask, int64_t *base, ptrdiff_t bstride, @@ -897,7 +897,7 @@ void test_vsse64_v_i64m4_m(vbool16_t mask, int64_t *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse64_v_i64m8_m(vbool8_t mask, int64_t *base, ptrdiff_t bstride, @@ -909,7 +909,7 @@ void test_vsse64_v_i64m8_m(vbool8_t mask, int64_t *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, @@ -921,7 +921,7 @@ void test_vsse8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, @@ -933,7 +933,7 @@ void test_vsse8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, @@ -945,7 +945,7 @@ void test_vsse8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, @@ -957,7 +957,7 @@ void test_vsse8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse8_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse8_v_u8m2_m(vbool4_t mask, uint8_t *base, ptrdiff_t bstride, @@ -969,7 +969,7 @@ void test_vsse8_v_u8m2_m(vbool4_t mask, uint8_t *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse8_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse8_v_u8m4_m(vbool2_t mask, uint8_t *base, ptrdiff_t bstride, @@ -981,7 +981,7 @@ void test_vsse8_v_u8m4_m(vbool2_t mask, uint8_t *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse8_v_u8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse8_v_u8m8_m(vbool1_t mask, uint8_t *base, ptrdiff_t bstride, @@ -993,7 +993,7 @@ void test_vsse8_v_u8m8_m(vbool1_t mask, uint8_t *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, @@ -1005,7 +1005,7 @@ void test_vsse16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, @@ -1017,7 +1017,7 @@ void test_vsse16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, @@ -1029,7 +1029,7 @@ void test_vsse16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse16_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse16_v_u16m2_m(vbool8_t mask, uint16_t *base, ptrdiff_t bstride, @@ -1041,7 +1041,7 @@ void test_vsse16_v_u16m2_m(vbool8_t mask, uint16_t *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse16_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse16_v_u16m4_m(vbool4_t mask, uint16_t *base, ptrdiff_t bstride, @@ -1053,7 +1053,7 @@ void test_vsse16_v_u16m4_m(vbool4_t mask, uint16_t *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse16_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse16_v_u16m8_m(vbool2_t mask, uint16_t *base, ptrdiff_t bstride, @@ -1065,7 +1065,7 @@ void test_vsse16_v_u16m8_m(vbool2_t mask, uint16_t *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, @@ -1077,7 +1077,7 @@ void test_vsse32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, @@ -1089,7 +1089,7 @@ void test_vsse32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse32_v_u32m2_m(vbool16_t mask, uint32_t *base, ptrdiff_t bstride, @@ -1101,7 +1101,7 @@ void test_vsse32_v_u32m2_m(vbool16_t mask, uint32_t *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse32_v_u32m4_m(vbool8_t mask, uint32_t *base, ptrdiff_t bstride, @@ -1113,7 +1113,7 @@ void test_vsse32_v_u32m4_m(vbool8_t mask, uint32_t *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse32_v_u32m8_m(vbool4_t mask, uint32_t *base, ptrdiff_t bstride, @@ -1125,7 +1125,7 @@ void test_vsse32_v_u32m8_m(vbool4_t mask, uint32_t *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, @@ -1137,7 +1137,7 @@ void test_vsse64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse64_v_u64m2_m(vbool32_t mask, uint64_t *base, ptrdiff_t bstride, @@ -1149,7 +1149,7 @@ void test_vsse64_v_u64m2_m(vbool32_t mask, uint64_t *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse64_v_u64m4_m(vbool16_t mask, uint64_t *base, ptrdiff_t bstride, @@ -1161,7 +1161,7 @@ void test_vsse64_v_u64m4_m(vbool16_t mask, uint64_t *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse64_v_u64m8_m(vbool8_t mask, uint64_t *base, ptrdiff_t bstride, @@ -1173,7 +1173,7 @@ void test_vsse64_v_u64m8_m(vbool8_t mask, uint64_t *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, @@ -1185,7 +1185,7 @@ void test_vsse32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, @@ -1197,7 +1197,7 @@ void test_vsse32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse32_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse32_v_f32m2_m(vbool16_t mask, float *base, ptrdiff_t bstride, @@ -1209,7 +1209,7 @@ void test_vsse32_v_f32m2_m(vbool16_t mask, float *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse32_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse32_v_f32m4_m(vbool8_t mask, float *base, ptrdiff_t bstride, @@ -1221,7 +1221,7 @@ void test_vsse32_v_f32m4_m(vbool8_t mask, float *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse32_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse32_v_f32m8_m(vbool4_t mask, float *base, ptrdiff_t bstride, @@ -1233,7 +1233,7 @@ void test_vsse32_v_f32m8_m(vbool4_t mask, float *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, @@ -1245,7 +1245,7 @@ void test_vsse64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse64_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse64_v_f64m2_m(vbool32_t mask, double *base, ptrdiff_t bstride, @@ -1257,7 +1257,7 @@ void test_vsse64_v_f64m2_m(vbool32_t mask, double *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse64_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse64_v_f64m4_m(vbool16_t mask, double *base, ptrdiff_t bstride, @@ -1269,7 +1269,7 @@ void test_vsse64_v_f64m4_m(vbool16_t mask, double *base, ptrdiff_t bstride, // CHECK-RV64-LABEL: @test_vsse64_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsse64_v_f64m8_m(vbool8_t mask, double *base, ptrdiff_t bstride, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c index 4fc3ae2..7b3599b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c @@ -450,7 +450,7 @@ vint64m8_t test_vssra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { // // CHECK-RV64-LABEL: @test_vssra_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, @@ -461,7 +461,7 @@ vint8mf8_t test_vssra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, // // CHECK-RV64-LABEL: @test_vssra_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, @@ -472,7 +472,7 @@ vint8mf8_t test_vssra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, // // CHECK-RV64-LABEL: @test_vssra_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, @@ -483,7 +483,7 @@ vint8mf4_t test_vssra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssra_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, @@ -494,7 +494,7 @@ vint8mf4_t test_vssra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssra_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, @@ -505,7 +505,7 @@ vint8mf2_t test_vssra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssra_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, @@ -516,7 +516,7 @@ vint8mf2_t test_vssra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssra_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, @@ -527,7 +527,7 @@ vint8m1_t test_vssra_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vssra_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, @@ -538,7 +538,7 @@ vint8m1_t test_vssra_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vssra_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, @@ -549,7 +549,7 @@ vint8m2_t test_vssra_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssra_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, @@ -560,7 +560,7 @@ vint8m2_t test_vssra_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssra_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, @@ -571,7 +571,7 @@ vint8m4_t test_vssra_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssra_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, @@ -582,7 +582,7 @@ vint8m4_t test_vssra_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssra_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, @@ -593,7 +593,7 @@ vint8m8_t test_vssra_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vssra_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, @@ -604,7 +604,7 @@ vint8m8_t test_vssra_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vssra_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -616,7 +616,7 @@ vint16mf4_t test_vssra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssra_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -627,7 +627,7 @@ vint16mf4_t test_vssra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssra_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -639,7 +639,7 @@ vint16mf2_t test_vssra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssra_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -650,7 +650,7 @@ vint16mf2_t test_vssra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssra_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -661,7 +661,7 @@ vint16m1_t test_vssra_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vssra_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -672,7 +672,7 @@ vint16m1_t test_vssra_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vssra_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -683,7 +683,7 @@ vint16m2_t test_vssra_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssra_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -694,7 +694,7 @@ vint16m2_t test_vssra_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssra_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -705,7 +705,7 @@ vint16m4_t test_vssra_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssra_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -716,7 +716,7 @@ vint16m4_t test_vssra_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssra_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -727,7 +727,7 @@ vint16m8_t test_vssra_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vssra_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -738,7 +738,7 @@ vint16m8_t test_vssra_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vssra_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -750,7 +750,7 @@ vint32mf2_t test_vssra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssra_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -761,7 +761,7 @@ vint32mf2_t test_vssra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssra_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -772,7 +772,7 @@ vint32m1_t test_vssra_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vssra_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -783,7 +783,7 @@ vint32m1_t test_vssra_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vssra_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -794,7 +794,7 @@ vint32m2_t test_vssra_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssra_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -805,7 +805,7 @@ vint32m2_t test_vssra_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssra_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -816,7 +816,7 @@ vint32m4_t test_vssra_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssra_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -827,7 +827,7 @@ vint32m4_t test_vssra_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssra_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -838,7 +838,7 @@ vint32m8_t test_vssra_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vssra_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -849,7 +849,7 @@ vint32m8_t test_vssra_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vssra_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -860,7 +860,7 @@ vint64m1_t test_vssra_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vssra_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -871,7 +871,7 @@ vint64m1_t test_vssra_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vssra_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -882,7 +882,7 @@ vint64m2_t test_vssra_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssra_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -893,7 +893,7 @@ vint64m2_t test_vssra_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssra_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -904,7 +904,7 @@ vint64m4_t test_vssra_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssra_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -915,7 +915,7 @@ vint64m4_t test_vssra_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssra_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -926,7 +926,7 @@ vint64m8_t test_vssra_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vssra_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c index 0428ce7..6c6b9d3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c @@ -450,7 +450,7 @@ vuint64m8_t test_vssrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { // // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, @@ -462,7 +462,7 @@ vuint8mf8_t test_vssrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, // // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, @@ -473,7 +473,7 @@ vuint8mf8_t test_vssrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, // // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, @@ -485,7 +485,7 @@ vuint8mf4_t test_vssrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, @@ -496,7 +496,7 @@ vuint8mf4_t test_vssrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, @@ -508,7 +508,7 @@ vuint8mf2_t test_vssrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, @@ -519,7 +519,7 @@ vuint8mf2_t test_vssrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssrl_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, @@ -530,7 +530,7 @@ vuint8m1_t test_vssrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vssrl_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, @@ -541,7 +541,7 @@ vuint8m1_t test_vssrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vssrl_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, @@ -552,7 +552,7 @@ vuint8m2_t test_vssrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssrl_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, @@ -563,7 +563,7 @@ vuint8m2_t test_vssrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssrl_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, @@ -574,7 +574,7 @@ vuint8m4_t test_vssrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssrl_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, @@ -585,7 +585,7 @@ vuint8m4_t test_vssrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssrl_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, @@ -596,7 +596,7 @@ vuint8m8_t test_vssrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vssrl_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, @@ -607,7 +607,7 @@ vuint8m8_t test_vssrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -619,7 +619,7 @@ vuint16mf4_t test_vssrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -630,7 +630,7 @@ vuint16mf4_t test_vssrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -642,7 +642,7 @@ vuint16mf2_t test_vssrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -653,7 +653,7 @@ vuint16mf2_t test_vssrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssrl_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -665,7 +665,7 @@ vuint16m1_t test_vssrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vssrl_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -676,7 +676,7 @@ vuint16m1_t test_vssrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vssrl_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -688,7 +688,7 @@ vuint16m2_t test_vssrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssrl_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -699,7 +699,7 @@ vuint16m2_t test_vssrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssrl_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -711,7 +711,7 @@ vuint16m4_t test_vssrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssrl_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -722,7 +722,7 @@ vuint16m4_t test_vssrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssrl_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -734,7 +734,7 @@ vuint16m8_t test_vssrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vssrl_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -745,7 +745,7 @@ vuint16m8_t test_vssrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -757,7 +757,7 @@ vuint32mf2_t test_vssrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -768,7 +768,7 @@ vuint32mf2_t test_vssrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssrl_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -780,7 +780,7 @@ vuint32m1_t test_vssrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vssrl_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -791,7 +791,7 @@ vuint32m1_t test_vssrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vssrl_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -803,7 +803,7 @@ vuint32m2_t test_vssrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssrl_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -814,7 +814,7 @@ vuint32m2_t test_vssrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssrl_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -826,7 +826,7 @@ vuint32m4_t test_vssrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssrl_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -837,7 +837,7 @@ vuint32m4_t test_vssrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssrl_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -849,7 +849,7 @@ vuint32m8_t test_vssrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vssrl_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -860,7 +860,7 @@ vuint32m8_t test_vssrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vssrl_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -872,7 +872,7 @@ vuint64m1_t test_vssrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vssrl_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -883,7 +883,7 @@ vuint64m1_t test_vssrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vssrl_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -895,7 +895,7 @@ vuint64m2_t test_vssrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssrl_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -906,7 +906,7 @@ vuint64m2_t test_vssrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssrl_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -918,7 +918,7 @@ vuint64m4_t test_vssrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssrl_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -929,7 +929,7 @@ vuint64m4_t test_vssrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssrl_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, @@ -941,7 +941,7 @@ vuint64m8_t test_vssrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vssrl_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c index b0bdae2..02ec458 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c @@ -890,7 +890,7 @@ vuint64m8_t test_vssubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { // // CHECK-RV64-LABEL: @test_vssub_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, @@ -901,7 +901,7 @@ vint8mf8_t test_vssub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, // // CHECK-RV64-LABEL: @test_vssub_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, @@ -912,7 +912,7 @@ vint8mf8_t test_vssub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, // // CHECK-RV64-LABEL: @test_vssub_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, @@ -923,7 +923,7 @@ vint8mf4_t test_vssub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssub_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, @@ -934,7 +934,7 @@ vint8mf4_t test_vssub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssub_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, @@ -945,7 +945,7 @@ vint8mf2_t test_vssub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssub_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, @@ -956,7 +956,7 @@ vint8mf2_t test_vssub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssub_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, @@ -967,7 +967,7 @@ vint8m1_t test_vssub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vssub_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, @@ -978,7 +978,7 @@ vint8m1_t test_vssub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vssub_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, @@ -989,7 +989,7 @@ vint8m2_t test_vssub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssub_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, @@ -1000,7 +1000,7 @@ vint8m2_t test_vssub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssub_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, @@ -1011,7 +1011,7 @@ vint8m4_t test_vssub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssub_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, @@ -1022,7 +1022,7 @@ vint8m4_t test_vssub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssub_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, @@ -1033,7 +1033,7 @@ vint8m8_t test_vssub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vssub_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, @@ -1044,7 +1044,7 @@ vint8m8_t test_vssub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vssub_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -1056,7 +1056,7 @@ vint16mf4_t test_vssub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssub_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -1067,7 +1067,7 @@ vint16mf4_t test_vssub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssub_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -1079,7 +1079,7 @@ vint16mf2_t test_vssub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssub_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -1090,7 +1090,7 @@ vint16mf2_t test_vssub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssub_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -1101,7 +1101,7 @@ vint16m1_t test_vssub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vssub_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -1112,7 +1112,7 @@ vint16m1_t test_vssub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vssub_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -1123,7 +1123,7 @@ vint16m2_t test_vssub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssub_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -1134,7 +1134,7 @@ vint16m2_t test_vssub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssub_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -1145,7 +1145,7 @@ vint16m4_t test_vssub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssub_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -1156,7 +1156,7 @@ vint16m4_t test_vssub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssub_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -1167,7 +1167,7 @@ vint16m8_t test_vssub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vssub_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -1178,7 +1178,7 @@ vint16m8_t test_vssub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vssub_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -1190,7 +1190,7 @@ vint32mf2_t test_vssub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssub_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -1201,7 +1201,7 @@ vint32mf2_t test_vssub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssub_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -1212,7 +1212,7 @@ vint32m1_t test_vssub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vssub_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -1223,7 +1223,7 @@ vint32m1_t test_vssub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vssub_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -1234,7 +1234,7 @@ vint32m2_t test_vssub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssub_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -1245,7 +1245,7 @@ vint32m2_t test_vssub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssub_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -1256,7 +1256,7 @@ vint32m4_t test_vssub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssub_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -1267,7 +1267,7 @@ vint32m4_t test_vssub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssub_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -1278,7 +1278,7 @@ vint32m8_t test_vssub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vssub_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -1289,7 +1289,7 @@ vint32m8_t test_vssub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vssub_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -1300,7 +1300,7 @@ vint64m1_t test_vssub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vssub_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -1311,7 +1311,7 @@ vint64m1_t test_vssub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vssub_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -1322,7 +1322,7 @@ vint64m2_t test_vssub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssub_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -1333,7 +1333,7 @@ vint64m2_t test_vssub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssub_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -1344,7 +1344,7 @@ vint64m4_t test_vssub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssub_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -1355,7 +1355,7 @@ vint64m4_t test_vssub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssub_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -1366,7 +1366,7 @@ vint64m8_t test_vssub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vssub_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -1377,7 +1377,7 @@ vint64m8_t test_vssub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, @@ -1389,7 +1389,7 @@ vuint8mf8_t test_vssubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, // // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, @@ -1400,7 +1400,7 @@ vuint8mf8_t test_vssubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, // // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, @@ -1412,7 +1412,7 @@ vuint8mf4_t test_vssubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, @@ -1423,7 +1423,7 @@ vuint8mf4_t test_vssubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, @@ -1435,7 +1435,7 @@ vuint8mf2_t test_vssubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, @@ -1446,7 +1446,7 @@ vuint8mf2_t test_vssubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssubu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, @@ -1457,7 +1457,7 @@ vuint8m1_t test_vssubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vssubu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, @@ -1468,7 +1468,7 @@ vuint8m1_t test_vssubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vssubu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, @@ -1479,7 +1479,7 @@ vuint8m2_t test_vssubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssubu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, @@ -1490,7 +1490,7 @@ vuint8m2_t test_vssubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssubu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, @@ -1501,7 +1501,7 @@ vuint8m4_t test_vssubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssubu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, @@ -1512,7 +1512,7 @@ vuint8m4_t test_vssubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssubu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, @@ -1523,7 +1523,7 @@ vuint8m8_t test_vssubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vssubu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, @@ -1534,7 +1534,7 @@ vuint8m8_t test_vssubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -1546,7 +1546,7 @@ vuint16mf4_t test_vssubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -1558,7 +1558,7 @@ vuint16mf4_t test_vssubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -1570,7 +1570,7 @@ vuint16mf2_t test_vssubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -1582,7 +1582,7 @@ vuint16mf2_t test_vssubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssubu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -1594,7 +1594,7 @@ vuint16m1_t test_vssubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vssubu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -1605,7 +1605,7 @@ vuint16m1_t test_vssubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vssubu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -1617,7 +1617,7 @@ vuint16m2_t test_vssubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssubu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -1628,7 +1628,7 @@ vuint16m2_t test_vssubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssubu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -1640,7 +1640,7 @@ vuint16m4_t test_vssubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssubu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -1651,7 +1651,7 @@ vuint16m4_t test_vssubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssubu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -1663,7 +1663,7 @@ vuint16m8_t test_vssubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vssubu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -1674,7 +1674,7 @@ vuint16m8_t test_vssubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -1686,7 +1686,7 @@ vuint32mf2_t test_vssubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -1698,7 +1698,7 @@ vuint32mf2_t test_vssubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssubu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -1710,7 +1710,7 @@ vuint32m1_t test_vssubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vssubu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -1721,7 +1721,7 @@ vuint32m1_t test_vssubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vssubu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -1733,7 +1733,7 @@ vuint32m2_t test_vssubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssubu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -1744,7 +1744,7 @@ vuint32m2_t test_vssubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssubu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -1756,7 +1756,7 @@ vuint32m4_t test_vssubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssubu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -1767,7 +1767,7 @@ vuint32m4_t test_vssubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssubu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -1779,7 +1779,7 @@ vuint32m8_t test_vssubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vssubu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -1790,7 +1790,7 @@ vuint32m8_t test_vssubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vssubu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -1802,7 +1802,7 @@ vuint64m1_t test_vssubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vssubu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -1813,7 +1813,7 @@ vuint64m1_t test_vssubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vssubu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -1825,7 +1825,7 @@ vuint64m2_t test_vssubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssubu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -1836,7 +1836,7 @@ vuint64m2_t test_vssubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vssubu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -1848,7 +1848,7 @@ vuint64m4_t test_vssubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssubu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -1859,7 +1859,7 @@ vuint64m4_t test_vssubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vssubu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, @@ -1871,7 +1871,7 @@ vuint64m8_t test_vssubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vssubu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c index 1ad5d00..51d9848 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c @@ -887,7 +887,7 @@ vuint64m8_t test_vsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { // // CHECK-RV64-LABEL: @test_vsub_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -897,7 +897,7 @@ vint8mf8_t test_vsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t // // CHECK-RV64-LABEL: @test_vsub_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { @@ -907,7 +907,7 @@ vint8mf8_t test_vsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t // // CHECK-RV64-LABEL: @test_vsub_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -917,7 +917,7 @@ vint8mf4_t test_vsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t // // CHECK-RV64-LABEL: @test_vsub_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { @@ -927,7 +927,7 @@ vint8mf4_t test_vsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t // // CHECK-RV64-LABEL: @test_vsub_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -937,7 +937,7 @@ vint8mf2_t test_vsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t // // CHECK-RV64-LABEL: @test_vsub_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { @@ -947,7 +947,7 @@ vint8mf2_t test_vsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t // // CHECK-RV64-LABEL: @test_vsub_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -957,7 +957,7 @@ vint8m1_t test_vsub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, // // CHECK-RV64-LABEL: @test_vsub_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { @@ -967,7 +967,7 @@ vint8m1_t test_vsub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, // // CHECK-RV64-LABEL: @test_vsub_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -977,7 +977,7 @@ vint8m2_t test_vsub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, // // CHECK-RV64-LABEL: @test_vsub_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { @@ -987,7 +987,7 @@ vint8m2_t test_vsub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, // // CHECK-RV64-LABEL: @test_vsub_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -997,7 +997,7 @@ vint8m4_t test_vsub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, // // CHECK-RV64-LABEL: @test_vsub_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { @@ -1007,7 +1007,7 @@ vint8m4_t test_vsub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, // // CHECK-RV64-LABEL: @test_vsub_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -1017,7 +1017,7 @@ vint8m8_t test_vsub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, // // CHECK-RV64-LABEL: @test_vsub_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { @@ -1027,7 +1027,7 @@ vint8m8_t test_vsub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, // // CHECK-RV64-LABEL: @test_vsub_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -1037,7 +1037,7 @@ vint16mf4_t test_vsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vsub_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { @@ -1047,7 +1047,7 @@ vint16mf4_t test_vsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vsub_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -1057,7 +1057,7 @@ vint16mf2_t test_vsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vsub_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { @@ -1067,7 +1067,7 @@ vint16mf2_t test_vsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vsub_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -1077,7 +1077,7 @@ vint16m1_t test_vsub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t // // CHECK-RV64-LABEL: @test_vsub_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { @@ -1087,7 +1087,7 @@ vint16m1_t test_vsub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t // // CHECK-RV64-LABEL: @test_vsub_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -1097,7 +1097,7 @@ vint16m2_t test_vsub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // // CHECK-RV64-LABEL: @test_vsub_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { @@ -1107,7 +1107,7 @@ vint16m2_t test_vsub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // // CHECK-RV64-LABEL: @test_vsub_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1117,7 +1117,7 @@ vint16m4_t test_vsub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // // CHECK-RV64-LABEL: @test_vsub_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { @@ -1127,7 +1127,7 @@ vint16m4_t test_vsub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // // CHECK-RV64-LABEL: @test_vsub_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -1137,7 +1137,7 @@ vint16m8_t test_vsub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // // CHECK-RV64-LABEL: @test_vsub_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { @@ -1147,7 +1147,7 @@ vint16m8_t test_vsub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // // CHECK-RV64-LABEL: @test_vsub_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1157,7 +1157,7 @@ vint32mf2_t test_vsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m // // CHECK-RV64-LABEL: @test_vsub_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { @@ -1167,7 +1167,7 @@ vint32mf2_t test_vsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m // // CHECK-RV64-LABEL: @test_vsub_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1177,7 +1177,7 @@ vint32m1_t test_vsub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t // // CHECK-RV64-LABEL: @test_vsub_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { @@ -1187,7 +1187,7 @@ vint32m1_t test_vsub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t // // CHECK-RV64-LABEL: @test_vsub_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1197,7 +1197,7 @@ vint32m2_t test_vsub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t // // CHECK-RV64-LABEL: @test_vsub_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { @@ -1207,7 +1207,7 @@ vint32m2_t test_vsub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t // // CHECK-RV64-LABEL: @test_vsub_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1217,7 +1217,7 @@ vint32m4_t test_vsub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // // CHECK-RV64-LABEL: @test_vsub_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { @@ -1227,7 +1227,7 @@ vint32m4_t test_vsub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // // CHECK-RV64-LABEL: @test_vsub_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1237,7 +1237,7 @@ vint32m8_t test_vsub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // // CHECK-RV64-LABEL: @test_vsub_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { @@ -1247,7 +1247,7 @@ vint32m8_t test_vsub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // // CHECK-RV64-LABEL: @test_vsub_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -1257,7 +1257,7 @@ vint64m1_t test_vsub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t // // CHECK-RV64-LABEL: @test_vsub_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { @@ -1267,7 +1267,7 @@ vint64m1_t test_vsub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t // // CHECK-RV64-LABEL: @test_vsub_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -1277,7 +1277,7 @@ vint64m2_t test_vsub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t // // CHECK-RV64-LABEL: @test_vsub_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { @@ -1287,7 +1287,7 @@ vint64m2_t test_vsub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t // // CHECK-RV64-LABEL: @test_vsub_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -1297,7 +1297,7 @@ vint64m4_t test_vsub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t // // CHECK-RV64-LABEL: @test_vsub_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { @@ -1307,7 +1307,7 @@ vint64m4_t test_vsub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t // // CHECK-RV64-LABEL: @test_vsub_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -1317,7 +1317,7 @@ vint64m8_t test_vsub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // // CHECK-RV64-LABEL: @test_vsub_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { @@ -1327,7 +1327,7 @@ vint64m8_t test_vsub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // // CHECK-RV64-LABEL: @test_vsub_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1337,7 +1337,7 @@ vuint8mf8_t test_vsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf // // CHECK-RV64-LABEL: @test_vsub_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -1347,7 +1347,7 @@ vuint8mf8_t test_vsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf // // CHECK-RV64-LABEL: @test_vsub_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1357,7 +1357,7 @@ vuint8mf4_t test_vsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf // // CHECK-RV64-LABEL: @test_vsub_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -1367,7 +1367,7 @@ vuint8mf4_t test_vsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf // // CHECK-RV64-LABEL: @test_vsub_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1377,7 +1377,7 @@ vuint8mf2_t test_vsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf // // CHECK-RV64-LABEL: @test_vsub_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -1387,7 +1387,7 @@ vuint8mf2_t test_vsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf // // CHECK-RV64-LABEL: @test_vsub_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1397,7 +1397,7 @@ vuint8m1_t test_vsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t o // // CHECK-RV64-LABEL: @test_vsub_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -1407,7 +1407,7 @@ vuint8m1_t test_vsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t o // // CHECK-RV64-LABEL: @test_vsub_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1417,7 +1417,7 @@ vuint8m2_t test_vsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t o // // CHECK-RV64-LABEL: @test_vsub_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -1427,7 +1427,7 @@ vuint8m2_t test_vsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t o // // CHECK-RV64-LABEL: @test_vsub_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -1437,7 +1437,7 @@ vuint8m4_t test_vsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t o // // CHECK-RV64-LABEL: @test_vsub_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -1447,7 +1447,7 @@ vuint8m4_t test_vsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t o // // CHECK-RV64-LABEL: @test_vsub_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -1457,7 +1457,7 @@ vuint8m8_t test_vsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t o // // CHECK-RV64-LABEL: @test_vsub_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -1467,7 +1467,7 @@ vuint8m8_t test_vsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t o // // CHECK-RV64-LABEL: @test_vsub_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -1477,7 +1477,7 @@ vuint16mf4_t test_vsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vsub_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -1487,7 +1487,7 @@ vuint16mf4_t test_vsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vsub_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -1497,7 +1497,7 @@ vuint16mf2_t test_vsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vsub_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -1507,7 +1507,7 @@ vuint16mf2_t test_vsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vsub_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -1517,7 +1517,7 @@ vuint16m1_t test_vsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m // // CHECK-RV64-LABEL: @test_vsub_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -1527,7 +1527,7 @@ vuint16m1_t test_vsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m // // CHECK-RV64-LABEL: @test_vsub_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -1537,7 +1537,7 @@ vuint16m2_t test_vsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2 // // CHECK-RV64-LABEL: @test_vsub_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -1547,7 +1547,7 @@ vuint16m2_t test_vsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2 // // CHECK-RV64-LABEL: @test_vsub_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -1557,7 +1557,7 @@ vuint16m4_t test_vsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4 // // CHECK-RV64-LABEL: @test_vsub_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -1567,7 +1567,7 @@ vuint16m4_t test_vsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4 // // CHECK-RV64-LABEL: @test_vsub_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -1577,7 +1577,7 @@ vuint16m8_t test_vsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8 // // CHECK-RV64-LABEL: @test_vsub_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -1587,7 +1587,7 @@ vuint16m8_t test_vsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8 // // CHECK-RV64-LABEL: @test_vsub_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1597,7 +1597,7 @@ vuint32mf2_t test_vsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vsub_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1607,7 +1607,7 @@ vuint32mf2_t test_vsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vsub_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1617,7 +1617,7 @@ vuint32m1_t test_vsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vsub_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1627,7 +1627,7 @@ vuint32m1_t test_vsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vsub_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1637,7 +1637,7 @@ vuint32m2_t test_vsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vsub_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1647,7 +1647,7 @@ vuint32m2_t test_vsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vsub_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1657,7 +1657,7 @@ vuint32m4_t test_vsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4 // // CHECK-RV64-LABEL: @test_vsub_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1667,7 +1667,7 @@ vuint32m4_t test_vsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4 // // CHECK-RV64-LABEL: @test_vsub_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1677,7 +1677,7 @@ vuint32m8_t test_vsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8 // // CHECK-RV64-LABEL: @test_vsub_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -1687,7 +1687,7 @@ vuint32m8_t test_vsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8 // // CHECK-RV64-LABEL: @test_vsub_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -1697,7 +1697,7 @@ vuint64m1_t test_vsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vsub_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -1707,7 +1707,7 @@ vuint64m1_t test_vsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vsub_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -1717,7 +1717,7 @@ vuint64m2_t test_vsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vsub_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -1727,7 +1727,7 @@ vuint64m2_t test_vsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vsub_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -1737,7 +1737,7 @@ vuint64m4_t test_vsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vsub_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -1747,7 +1747,7 @@ vuint64m4_t test_vsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vsub_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1757,7 +1757,7 @@ vuint64m8_t test_vsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8 // // CHECK-RV64-LABEL: @test_vsub_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsuxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsuxei.c index 45f8dd3..6c829d7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsuxei.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsuxei.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t value, @@ -21,7 +21,7 @@ void test_vsuxei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t value, @@ -33,7 +33,7 @@ void test_vsuxei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t value, @@ -45,7 +45,7 @@ void test_vsuxei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t value, @@ -57,7 +57,7 @@ void test_vsuxei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t value, @@ -69,7 +69,7 @@ void test_vsuxei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i8.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i8.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i8m4(int8_t *base, vuint8m4_t bindex, vint8m4_t value, @@ -81,7 +81,7 @@ void test_vsuxei8_v_i8m4(int8_t *base, vuint8m4_t bindex, vint8m4_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_i8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv64i8.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv64i8.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i8m8(int8_t *base, vuint8m8_t bindex, vint8m8_t value, @@ -93,7 +93,7 @@ void test_vsuxei8_v_i8m8(int8_t *base, vuint8m8_t bindex, vint8m8_t value, // CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t value, @@ -105,7 +105,7 @@ void test_vsuxei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t value, // CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t value, @@ -117,7 +117,7 @@ void test_vsuxei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t value, // CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t value, @@ -129,7 +129,7 @@ void test_vsuxei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t value, // CHECK-RV64-LABEL: @test_vsuxei16_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t value, @@ -141,7 +141,7 @@ void test_vsuxei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t value, // CHECK-RV64-LABEL: @test_vsuxei16_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t value, @@ -153,7 +153,7 @@ void test_vsuxei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t value, // CHECK-RV64-LABEL: @test_vsuxei16_v_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i8.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i8.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i8m4(int8_t *base, vuint16m8_t bindex, vint8m4_t value, @@ -165,7 +165,7 @@ void test_vsuxei16_v_i8m4(int8_t *base, vuint16m8_t bindex, vint8m4_t value, // CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t value, @@ -177,7 +177,7 @@ void test_vsuxei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t value, // CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t value, @@ -189,7 +189,7 @@ void test_vsuxei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t value, // CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t value, @@ -201,7 +201,7 @@ void test_vsuxei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t value, // CHECK-RV64-LABEL: @test_vsuxei32_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t value, @@ -213,7 +213,7 @@ void test_vsuxei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t value, // CHECK-RV64-LABEL: @test_vsuxei32_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t value, @@ -225,7 +225,7 @@ void test_vsuxei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t value, // CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t value, @@ -237,7 +237,7 @@ void test_vsuxei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t value, // CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t value, @@ -249,7 +249,7 @@ void test_vsuxei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t value, // CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t value, @@ -261,7 +261,7 @@ void test_vsuxei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t value, // CHECK-RV64-LABEL: @test_vsuxei64_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t value, @@ -273,7 +273,7 @@ void test_vsuxei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t value, @@ -285,7 +285,7 @@ void test_vsuxei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t value, @@ -297,7 +297,7 @@ void test_vsuxei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t value, @@ -309,7 +309,7 @@ void test_vsuxei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t value, @@ -321,7 +321,7 @@ void test_vsuxei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i16m4(int16_t *base, vuint8m2_t bindex, vint16m4_t value, @@ -333,7 +333,7 @@ void test_vsuxei8_v_i16m4(int16_t *base, vuint8m2_t bindex, vint16m4_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i16m8(int16_t *base, vuint8m4_t bindex, vint16m8_t value, @@ -345,7 +345,7 @@ void test_vsuxei8_v_i16m8(int16_t *base, vuint8m4_t bindex, vint16m8_t value, // CHECK-RV64-LABEL: @test_vsuxei16_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, @@ -357,7 +357,7 @@ void test_vsuxei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, @@ -369,7 +369,7 @@ void test_vsuxei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t value, @@ -381,7 +381,7 @@ void test_vsuxei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t value, // CHECK-RV64-LABEL: @test_vsuxei16_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t value, @@ -393,7 +393,7 @@ void test_vsuxei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t value, // CHECK-RV64-LABEL: @test_vsuxei16_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i16m4(int16_t *base, vuint16m4_t bindex, vint16m4_t value, @@ -405,7 +405,7 @@ void test_vsuxei16_v_i16m4(int16_t *base, vuint16m4_t bindex, vint16m4_t value, // CHECK-RV64-LABEL: @test_vsuxei16_v_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i16m8(int16_t *base, vuint16m8_t bindex, vint16m8_t value, @@ -417,7 +417,7 @@ void test_vsuxei16_v_i16m8(int16_t *base, vuint16m8_t bindex, vint16m8_t value, // CHECK-RV64-LABEL: @test_vsuxei32_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, @@ -429,7 +429,7 @@ void test_vsuxei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, @@ -441,7 +441,7 @@ void test_vsuxei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t value, @@ -453,7 +453,7 @@ void test_vsuxei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t value, // CHECK-RV64-LABEL: @test_vsuxei32_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t value, @@ -465,7 +465,7 @@ void test_vsuxei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t value, // CHECK-RV64-LABEL: @test_vsuxei32_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i16m4(int16_t *base, vuint32m8_t bindex, vint16m4_t value, @@ -477,7 +477,7 @@ void test_vsuxei32_v_i16m4(int16_t *base, vuint32m8_t bindex, vint16m4_t value, // CHECK-RV64-LABEL: @test_vsuxei64_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, @@ -489,7 +489,7 @@ void test_vsuxei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, @@ -501,7 +501,7 @@ void test_vsuxei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t value, @@ -513,7 +513,7 @@ void test_vsuxei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t value, // CHECK-RV64-LABEL: @test_vsuxei64_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t value, @@ -525,7 +525,7 @@ void test_vsuxei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, @@ -537,7 +537,7 @@ void test_vsuxei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t value, @@ -549,7 +549,7 @@ void test_vsuxei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t value, @@ -561,7 +561,7 @@ void test_vsuxei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t value, @@ -573,7 +573,7 @@ void test_vsuxei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i32m8(int32_t *base, vuint8m2_t bindex, vint32m8_t value, @@ -585,7 +585,7 @@ void test_vsuxei8_v_i32m8(int32_t *base, vuint8m2_t bindex, vint32m8_t value, // CHECK-RV64-LABEL: @test_vsuxei16_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, @@ -597,7 +597,7 @@ void test_vsuxei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t value, @@ -609,7 +609,7 @@ void test_vsuxei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t value, // CHECK-RV64-LABEL: @test_vsuxei16_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t value, @@ -621,7 +621,7 @@ void test_vsuxei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t value, // CHECK-RV64-LABEL: @test_vsuxei16_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t value, @@ -633,7 +633,7 @@ void test_vsuxei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t value, // CHECK-RV64-LABEL: @test_vsuxei16_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i32m8(int32_t *base, vuint16m4_t bindex, vint32m8_t value, @@ -645,7 +645,7 @@ void test_vsuxei16_v_i32m8(int32_t *base, vuint16m4_t bindex, vint32m8_t value, // CHECK-RV64-LABEL: @test_vsuxei32_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, @@ -657,7 +657,7 @@ void test_vsuxei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t value, @@ -669,7 +669,7 @@ void test_vsuxei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t value, // CHECK-RV64-LABEL: @test_vsuxei32_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t value, @@ -681,7 +681,7 @@ void test_vsuxei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t value, // CHECK-RV64-LABEL: @test_vsuxei32_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t value, @@ -693,7 +693,7 @@ void test_vsuxei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t value, // CHECK-RV64-LABEL: @test_vsuxei32_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i32m8(int32_t *base, vuint32m8_t bindex, vint32m8_t value, @@ -705,7 +705,7 @@ void test_vsuxei32_v_i32m8(int32_t *base, vuint32m8_t bindex, vint32m8_t value, // CHECK-RV64-LABEL: @test_vsuxei64_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, @@ -717,7 +717,7 @@ void test_vsuxei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t value, @@ -729,7 +729,7 @@ void test_vsuxei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t value, // CHECK-RV64-LABEL: @test_vsuxei64_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t value, @@ -741,7 +741,7 @@ void test_vsuxei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t value, // CHECK-RV64-LABEL: @test_vsuxei64_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t value, @@ -753,7 +753,7 @@ void test_vsuxei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t value, @@ -765,7 +765,7 @@ void test_vsuxei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t value, @@ -777,7 +777,7 @@ void test_vsuxei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t value, @@ -789,7 +789,7 @@ void test_vsuxei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i64m8(int64_t *base, vuint8m1_t bindex, vint64m8_t value, @@ -801,7 +801,7 @@ void test_vsuxei8_v_i64m8(int64_t *base, vuint8m1_t bindex, vint64m8_t value, // CHECK-RV64-LABEL: @test_vsuxei16_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t value, @@ -813,7 +813,7 @@ void test_vsuxei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t value, // CHECK-RV64-LABEL: @test_vsuxei16_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t value, @@ -825,7 +825,7 @@ void test_vsuxei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t value, // CHECK-RV64-LABEL: @test_vsuxei16_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t value, @@ -837,7 +837,7 @@ void test_vsuxei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t value, // CHECK-RV64-LABEL: @test_vsuxei16_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i64m8(int64_t *base, vuint16m2_t bindex, vint64m8_t value, @@ -849,7 +849,7 @@ void test_vsuxei16_v_i64m8(int64_t *base, vuint16m2_t bindex, vint64m8_t value, // CHECK-RV64-LABEL: @test_vsuxei32_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t value, @@ -861,7 +861,7 @@ void test_vsuxei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t value, // CHECK-RV64-LABEL: @test_vsuxei32_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t value, @@ -873,7 +873,7 @@ void test_vsuxei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t value, // CHECK-RV64-LABEL: @test_vsuxei32_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t value, @@ -885,7 +885,7 @@ void test_vsuxei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t value, // CHECK-RV64-LABEL: @test_vsuxei32_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i64m8(int64_t *base, vuint32m4_t bindex, vint64m8_t value, @@ -897,7 +897,7 @@ void test_vsuxei32_v_i64m8(int64_t *base, vuint32m4_t bindex, vint64m8_t value, // CHECK-RV64-LABEL: @test_vsuxei64_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t value, @@ -909,7 +909,7 @@ void test_vsuxei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t value, // CHECK-RV64-LABEL: @test_vsuxei64_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t value, @@ -921,7 +921,7 @@ void test_vsuxei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t value, // CHECK-RV64-LABEL: @test_vsuxei64_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t value, @@ -933,7 +933,7 @@ void test_vsuxei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t value, // CHECK-RV64-LABEL: @test_vsuxei64_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i64m8(int64_t *base, vuint64m8_t bindex, vint64m8_t value, @@ -945,7 +945,7 @@ void test_vsuxei64_v_i64m8(int64_t *base, vuint64m8_t bindex, vint64m8_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value, @@ -957,7 +957,7 @@ void test_vsuxei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value, @@ -969,7 +969,7 @@ void test_vsuxei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value, @@ -981,7 +981,7 @@ void test_vsuxei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t value, @@ -993,7 +993,7 @@ void test_vsuxei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t value, @@ -1005,7 +1005,7 @@ void test_vsuxei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i8.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i8.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u8m4(uint8_t *base, vuint8m4_t bindex, vuint8m4_t value, @@ -1017,7 +1017,7 @@ void test_vsuxei8_v_u8m4(uint8_t *base, vuint8m4_t bindex, vuint8m4_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_u8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv64i8.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv64i8.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u8m8(uint8_t *base, vuint8m8_t bindex, vuint8m8_t value, @@ -1029,7 +1029,7 @@ void test_vsuxei8_v_u8m8(uint8_t *base, vuint8m8_t bindex, vuint8m8_t value, // CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, @@ -1041,7 +1041,7 @@ void test_vsuxei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, @@ -1053,7 +1053,7 @@ void test_vsuxei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value, @@ -1065,7 +1065,7 @@ void test_vsuxei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value, // CHECK-RV64-LABEL: @test_vsuxei16_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t value, @@ -1077,7 +1077,7 @@ void test_vsuxei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t value, // CHECK-RV64-LABEL: @test_vsuxei16_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t value, @@ -1089,7 +1089,7 @@ void test_vsuxei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t value, // CHECK-RV64-LABEL: @test_vsuxei16_v_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i8.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i8.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u8m4(uint8_t *base, vuint16m8_t bindex, vuint8m4_t value, @@ -1101,7 +1101,7 @@ void test_vsuxei16_v_u8m4(uint8_t *base, vuint16m8_t bindex, vuint8m4_t value, // CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, @@ -1113,7 +1113,7 @@ void test_vsuxei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value, @@ -1125,7 +1125,7 @@ void test_vsuxei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value, // CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value, @@ -1137,7 +1137,7 @@ void test_vsuxei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value, // CHECK-RV64-LABEL: @test_vsuxei32_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t value, @@ -1149,7 +1149,7 @@ void test_vsuxei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t value, // CHECK-RV64-LABEL: @test_vsuxei32_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t value, @@ -1161,7 +1161,7 @@ void test_vsuxei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t value, // CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value, @@ -1173,7 +1173,7 @@ void test_vsuxei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value, // CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value, @@ -1185,7 +1185,7 @@ void test_vsuxei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value, // CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value, @@ -1197,7 +1197,7 @@ void test_vsuxei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value, // CHECK-RV64-LABEL: @test_vsuxei64_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t value, @@ -1209,7 +1209,7 @@ void test_vsuxei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, @@ -1221,7 +1221,7 @@ void test_vsuxei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, @@ -1233,7 +1233,7 @@ void test_vsuxei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value, @@ -1245,7 +1245,7 @@ void test_vsuxei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t value, @@ -1257,7 +1257,7 @@ void test_vsuxei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u16m4(uint16_t *base, vuint8m2_t bindex, vuint16m4_t value, @@ -1269,7 +1269,7 @@ void test_vsuxei8_v_u16m4(uint16_t *base, vuint8m2_t bindex, vuint16m4_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u16m8(uint16_t *base, vuint8m4_t bindex, vuint16m8_t value, @@ -1281,7 +1281,7 @@ void test_vsuxei8_v_u16m8(uint16_t *base, vuint8m4_t bindex, vuint16m8_t value, // CHECK-RV64-LABEL: @test_vsuxei16_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, @@ -1293,7 +1293,7 @@ void test_vsuxei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, @@ -1305,7 +1305,7 @@ void test_vsuxei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, @@ -1317,7 +1317,7 @@ void test_vsuxei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, @@ -1329,7 +1329,7 @@ void test_vsuxei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u16m4(uint16_t *base, vuint16m4_t bindex, @@ -1341,7 +1341,7 @@ void test_vsuxei16_v_u16m4(uint16_t *base, vuint16m4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u16m8(uint16_t *base, vuint16m8_t bindex, @@ -1353,7 +1353,7 @@ void test_vsuxei16_v_u16m8(uint16_t *base, vuint16m8_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, @@ -1365,7 +1365,7 @@ void test_vsuxei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, @@ -1377,7 +1377,7 @@ void test_vsuxei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, @@ -1389,7 +1389,7 @@ void test_vsuxei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, @@ -1401,7 +1401,7 @@ void test_vsuxei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u16m4(uint16_t *base, vuint32m8_t bindex, @@ -1413,7 +1413,7 @@ void test_vsuxei32_v_u16m4(uint16_t *base, vuint32m8_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, @@ -1425,7 +1425,7 @@ void test_vsuxei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, @@ -1437,7 +1437,7 @@ void test_vsuxei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, @@ -1449,7 +1449,7 @@ void test_vsuxei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, @@ -1461,7 +1461,7 @@ void test_vsuxei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, @@ -1473,7 +1473,7 @@ void test_vsuxei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, @@ -1485,7 +1485,7 @@ void test_vsuxei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, @@ -1497,7 +1497,7 @@ void test_vsuxei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, @@ -1509,7 +1509,7 @@ void test_vsuxei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u32m8(uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, @@ -1521,7 +1521,7 @@ void test_vsuxei8_v_u32m8(uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, // CHECK-RV64-LABEL: @test_vsuxei16_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, @@ -1533,7 +1533,7 @@ void test_vsuxei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, @@ -1545,7 +1545,7 @@ void test_vsuxei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, @@ -1557,7 +1557,7 @@ void test_vsuxei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u32m4(uint32_t *base, vuint16m2_t bindex, @@ -1569,7 +1569,7 @@ void test_vsuxei16_v_u32m4(uint32_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u32m8(uint32_t *base, vuint16m4_t bindex, @@ -1581,7 +1581,7 @@ void test_vsuxei16_v_u32m8(uint32_t *base, vuint16m4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, @@ -1593,7 +1593,7 @@ void test_vsuxei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, @@ -1605,7 +1605,7 @@ void test_vsuxei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, @@ -1617,7 +1617,7 @@ void test_vsuxei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u32m4(uint32_t *base, vuint32m4_t bindex, @@ -1629,7 +1629,7 @@ void test_vsuxei32_v_u32m4(uint32_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u32m8(uint32_t *base, vuint32m8_t bindex, @@ -1641,7 +1641,7 @@ void test_vsuxei32_v_u32m8(uint32_t *base, vuint32m8_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, @@ -1653,7 +1653,7 @@ void test_vsuxei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, @@ -1665,7 +1665,7 @@ void test_vsuxei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, @@ -1677,7 +1677,7 @@ void test_vsuxei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u32m4(uint32_t *base, vuint64m8_t bindex, @@ -1689,7 +1689,7 @@ void test_vsuxei64_v_u32m4(uint32_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, @@ -1701,7 +1701,7 @@ void test_vsuxei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, @@ -1713,7 +1713,7 @@ void test_vsuxei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, @@ -1725,7 +1725,7 @@ void test_vsuxei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u64m8(uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, @@ -1737,7 +1737,7 @@ void test_vsuxei8_v_u64m8(uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, // CHECK-RV64-LABEL: @test_vsuxei16_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, @@ -1749,7 +1749,7 @@ void test_vsuxei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, @@ -1761,7 +1761,7 @@ void test_vsuxei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u64m4(uint64_t *base, vuint16m1_t bindex, @@ -1773,7 +1773,7 @@ void test_vsuxei16_v_u64m4(uint64_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u64m8(uint64_t *base, vuint16m2_t bindex, @@ -1785,7 +1785,7 @@ void test_vsuxei16_v_u64m8(uint64_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, @@ -1797,7 +1797,7 @@ void test_vsuxei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, @@ -1809,7 +1809,7 @@ void test_vsuxei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u64m4(uint64_t *base, vuint32m2_t bindex, @@ -1821,7 +1821,7 @@ void test_vsuxei32_v_u64m4(uint64_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u64m8(uint64_t *base, vuint32m4_t bindex, @@ -1833,7 +1833,7 @@ void test_vsuxei32_v_u64m8(uint64_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, @@ -1845,7 +1845,7 @@ void test_vsuxei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, @@ -1857,7 +1857,7 @@ void test_vsuxei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u64m4(uint64_t *base, vuint64m4_t bindex, @@ -1869,7 +1869,7 @@ void test_vsuxei64_v_u64m4(uint64_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u64m8(uint64_t *base, vuint64m8_t bindex, @@ -1881,7 +1881,7 @@ void test_vsuxei64_v_u64m8(uint64_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t value, @@ -1893,7 +1893,7 @@ void test_vsuxei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t value, @@ -1905,7 +1905,7 @@ void test_vsuxei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t value, @@ -1917,7 +1917,7 @@ void test_vsuxei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f32m4(float *base, vuint8m1_t bindex, vfloat32m4_t value, @@ -1929,7 +1929,7 @@ void test_vsuxei8_v_f32m4(float *base, vuint8m1_t bindex, vfloat32m4_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16f32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16f32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f32m8(float *base, vuint8m2_t bindex, vfloat32m8_t value, @@ -1941,7 +1941,7 @@ void test_vsuxei8_v_f32m8(float *base, vuint8m2_t bindex, vfloat32m8_t value, // CHECK-RV64-LABEL: @test_vsuxei16_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f32mf2(float *base, vuint16mf4_t bindex, @@ -1953,7 +1953,7 @@ void test_vsuxei16_v_f32mf2(float *base, vuint16mf4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t value, @@ -1965,7 +1965,7 @@ void test_vsuxei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t value, // CHECK-RV64-LABEL: @test_vsuxei16_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t value, @@ -1977,7 +1977,7 @@ void test_vsuxei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t value, // CHECK-RV64-LABEL: @test_vsuxei16_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f32m4(float *base, vuint16m2_t bindex, vfloat32m4_t value, @@ -1989,7 +1989,7 @@ void test_vsuxei16_v_f32m4(float *base, vuint16m2_t bindex, vfloat32m4_t value, // CHECK-RV64-LABEL: @test_vsuxei16_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16f32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16f32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f32m8(float *base, vuint16m4_t bindex, vfloat32m8_t value, @@ -2001,7 +2001,7 @@ void test_vsuxei16_v_f32m8(float *base, vuint16m4_t bindex, vfloat32m8_t value, // CHECK-RV64-LABEL: @test_vsuxei32_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f32mf2(float *base, vuint32mf2_t bindex, @@ -2013,7 +2013,7 @@ void test_vsuxei32_v_f32mf2(float *base, vuint32mf2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t value, @@ -2025,7 +2025,7 @@ void test_vsuxei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t value, // CHECK-RV64-LABEL: @test_vsuxei32_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t value, @@ -2037,7 +2037,7 @@ void test_vsuxei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t value, // CHECK-RV64-LABEL: @test_vsuxei32_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f32m4(float *base, vuint32m4_t bindex, vfloat32m4_t value, @@ -2049,7 +2049,7 @@ void test_vsuxei32_v_f32m4(float *base, vuint32m4_t bindex, vfloat32m4_t value, // CHECK-RV64-LABEL: @test_vsuxei32_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16f32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16f32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f32m8(float *base, vuint32m8_t bindex, vfloat32m8_t value, @@ -2061,7 +2061,7 @@ void test_vsuxei32_v_f32m8(float *base, vuint32m8_t bindex, vfloat32m8_t value, // CHECK-RV64-LABEL: @test_vsuxei64_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_f32mf2(float *base, vuint64m1_t bindex, @@ -2073,7 +2073,7 @@ void test_vsuxei64_v_f32mf2(float *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t value, @@ -2085,7 +2085,7 @@ void test_vsuxei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t value, // CHECK-RV64-LABEL: @test_vsuxei64_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t value, @@ -2097,7 +2097,7 @@ void test_vsuxei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t value, // CHECK-RV64-LABEL: @test_vsuxei64_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_f32m4(float *base, vuint64m8_t bindex, vfloat32m4_t value, @@ -2109,7 +2109,7 @@ void test_vsuxei64_v_f32m4(float *base, vuint64m8_t bindex, vfloat32m4_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t value, @@ -2121,7 +2121,7 @@ void test_vsuxei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t value, @@ -2133,7 +2133,7 @@ void test_vsuxei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f64m4(double *base, vuint8mf2_t bindex, vfloat64m4_t value, @@ -2145,7 +2145,7 @@ void test_vsuxei8_v_f64m4(double *base, vuint8mf2_t bindex, vfloat64m4_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f64m8(double *base, vuint8m1_t bindex, vfloat64m8_t value, @@ -2157,7 +2157,7 @@ void test_vsuxei8_v_f64m8(double *base, vuint8m1_t bindex, vfloat64m8_t value, // CHECK-RV64-LABEL: @test_vsuxei16_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f64m1(double *base, vuint16mf4_t bindex, @@ -2169,7 +2169,7 @@ void test_vsuxei16_v_f64m1(double *base, vuint16mf4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f64m2(double *base, vuint16mf2_t bindex, @@ -2181,7 +2181,7 @@ void test_vsuxei16_v_f64m2(double *base, vuint16mf2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f64m4(double *base, vuint16m1_t bindex, vfloat64m4_t value, @@ -2193,7 +2193,7 @@ void test_vsuxei16_v_f64m4(double *base, vuint16m1_t bindex, vfloat64m4_t value, // CHECK-RV64-LABEL: @test_vsuxei16_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f64m8(double *base, vuint16m2_t bindex, vfloat64m8_t value, @@ -2205,7 +2205,7 @@ void test_vsuxei16_v_f64m8(double *base, vuint16m2_t bindex, vfloat64m8_t value, // CHECK-RV64-LABEL: @test_vsuxei32_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f64m1(double *base, vuint32mf2_t bindex, @@ -2217,7 +2217,7 @@ void test_vsuxei32_v_f64m1(double *base, vuint32mf2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t value, @@ -2229,7 +2229,7 @@ void test_vsuxei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t value, // CHECK-RV64-LABEL: @test_vsuxei32_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f64m4(double *base, vuint32m2_t bindex, vfloat64m4_t value, @@ -2241,7 +2241,7 @@ void test_vsuxei32_v_f64m4(double *base, vuint32m2_t bindex, vfloat64m4_t value, // CHECK-RV64-LABEL: @test_vsuxei32_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f64m8(double *base, vuint32m4_t bindex, vfloat64m8_t value, @@ -2253,7 +2253,7 @@ void test_vsuxei32_v_f64m8(double *base, vuint32m4_t bindex, vfloat64m8_t value, // CHECK-RV64-LABEL: @test_vsuxei64_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t value, @@ -2265,7 +2265,7 @@ void test_vsuxei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t value, // CHECK-RV64-LABEL: @test_vsuxei64_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t value, @@ -2277,7 +2277,7 @@ void test_vsuxei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t value, // CHECK-RV64-LABEL: @test_vsuxei64_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_f64m4(double *base, vuint64m4_t bindex, vfloat64m4_t value, @@ -2289,7 +2289,7 @@ void test_vsuxei64_v_f64m4(double *base, vuint64m4_t bindex, vfloat64m4_t value, // CHECK-RV64-LABEL: @test_vsuxei64_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_f64m8(double *base, vuint64m8_t bindex, vfloat64m8_t value, @@ -2301,7 +2301,7 @@ void test_vsuxei64_v_f64m8(double *base, vuint64m8_t bindex, vfloat64m8_t value, // CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, @@ -2313,7 +2313,7 @@ void test_vsuxei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, @@ -2325,7 +2325,7 @@ void test_vsuxei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, @@ -2337,7 +2337,7 @@ void test_vsuxei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, @@ -2349,7 +2349,7 @@ void test_vsuxei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, @@ -2361,7 +2361,7 @@ void test_vsuxei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i8m4_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, @@ -2373,7 +2373,7 @@ void test_vsuxei8_v_i8m4_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_i8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv64i8.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv64i8.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i8m8_m(vbool1_t mask, int8_t *base, vuint8m8_t bindex, @@ -2385,7 +2385,7 @@ void test_vsuxei8_v_i8m8_m(vbool1_t mask, int8_t *base, vuint8m8_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, @@ -2397,7 +2397,7 @@ void test_vsuxei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, @@ -2409,7 +2409,7 @@ void test_vsuxei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, @@ -2421,7 +2421,7 @@ void test_vsuxei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, @@ -2433,7 +2433,7 @@ void test_vsuxei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, @@ -2445,7 +2445,7 @@ void test_vsuxei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i8m4_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, @@ -2457,7 +2457,7 @@ void test_vsuxei16_v_i8m4_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, @@ -2469,7 +2469,7 @@ void test_vsuxei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, @@ -2481,7 +2481,7 @@ void test_vsuxei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, @@ -2493,7 +2493,7 @@ void test_vsuxei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, @@ -2505,7 +2505,7 @@ void test_vsuxei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, @@ -2517,7 +2517,7 @@ void test_vsuxei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, @@ -2529,7 +2529,7 @@ void test_vsuxei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, @@ -2541,7 +2541,7 @@ void test_vsuxei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, @@ -2553,7 +2553,7 @@ void test_vsuxei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, @@ -2565,7 +2565,7 @@ void test_vsuxei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, @@ -2577,7 +2577,7 @@ void test_vsuxei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, @@ -2589,7 +2589,7 @@ void test_vsuxei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, @@ -2601,7 +2601,7 @@ void test_vsuxei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, @@ -2613,7 +2613,7 @@ void test_vsuxei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i16m4_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, @@ -2625,7 +2625,7 @@ void test_vsuxei8_v_i16m4_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i16m8_m(vbool2_t mask, int16_t *base, vuint8m4_t bindex, @@ -2637,7 +2637,7 @@ void test_vsuxei8_v_i16m8_m(vbool2_t mask, int16_t *base, vuint8m4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i16mf4_m(vbool64_t mask, int16_t *base, @@ -2650,7 +2650,7 @@ void test_vsuxei16_v_i16mf4_m(vbool64_t mask, int16_t *base, // CHECK-RV64-LABEL: @test_vsuxei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i16mf2_m(vbool32_t mask, int16_t *base, @@ -2663,7 +2663,7 @@ void test_vsuxei16_v_i16mf2_m(vbool32_t mask, int16_t *base, // CHECK-RV64-LABEL: @test_vsuxei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, @@ -2675,7 +2675,7 @@ void test_vsuxei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, @@ -2687,7 +2687,7 @@ void test_vsuxei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i16m4_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, @@ -2699,7 +2699,7 @@ void test_vsuxei16_v_i16m4_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i16m8_m(vbool2_t mask, int16_t *base, vuint16m8_t bindex, @@ -2711,7 +2711,7 @@ void test_vsuxei16_v_i16m8_m(vbool2_t mask, int16_t *base, vuint16m8_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i16mf4_m(vbool64_t mask, int16_t *base, @@ -2724,7 +2724,7 @@ void test_vsuxei32_v_i16mf4_m(vbool64_t mask, int16_t *base, // CHECK-RV64-LABEL: @test_vsuxei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, @@ -2736,7 +2736,7 @@ void test_vsuxei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, @@ -2748,7 +2748,7 @@ void test_vsuxei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, @@ -2760,7 +2760,7 @@ void test_vsuxei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i16m4_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, @@ -2772,7 +2772,7 @@ void test_vsuxei32_v_i16m4_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, @@ -2784,7 +2784,7 @@ void test_vsuxei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, @@ -2796,7 +2796,7 @@ void test_vsuxei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, @@ -2808,7 +2808,7 @@ void test_vsuxei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, @@ -2820,7 +2820,7 @@ void test_vsuxei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, @@ -2832,7 +2832,7 @@ void test_vsuxei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, @@ -2844,7 +2844,7 @@ void test_vsuxei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, @@ -2856,7 +2856,7 @@ void test_vsuxei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, @@ -2868,7 +2868,7 @@ void test_vsuxei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i32m8_m(vbool4_t mask, int32_t *base, vuint8m2_t bindex, @@ -2880,7 +2880,7 @@ void test_vsuxei8_v_i32m8_m(vbool4_t mask, int32_t *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i32mf2_m(vbool64_t mask, int32_t *base, @@ -2893,7 +2893,7 @@ void test_vsuxei16_v_i32mf2_m(vbool64_t mask, int32_t *base, // CHECK-RV64-LABEL: @test_vsuxei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, @@ -2905,7 +2905,7 @@ void test_vsuxei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, @@ -2917,7 +2917,7 @@ void test_vsuxei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, @@ -2929,7 +2929,7 @@ void test_vsuxei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i32m8_m(vbool4_t mask, int32_t *base, vuint16m4_t bindex, @@ -2941,7 +2941,7 @@ void test_vsuxei16_v_i32m8_m(vbool4_t mask, int32_t *base, vuint16m4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i32mf2_m(vbool64_t mask, int32_t *base, @@ -2954,7 +2954,7 @@ void test_vsuxei32_v_i32mf2_m(vbool64_t mask, int32_t *base, // CHECK-RV64-LABEL: @test_vsuxei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, @@ -2966,7 +2966,7 @@ void test_vsuxei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, @@ -2978,7 +2978,7 @@ void test_vsuxei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, @@ -2990,7 +2990,7 @@ void test_vsuxei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i32m8_m(vbool4_t mask, int32_t *base, vuint32m8_t bindex, @@ -3002,7 +3002,7 @@ void test_vsuxei32_v_i32m8_m(vbool4_t mask, int32_t *base, vuint32m8_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, @@ -3014,7 +3014,7 @@ void test_vsuxei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, @@ -3026,7 +3026,7 @@ void test_vsuxei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, @@ -3038,7 +3038,7 @@ void test_vsuxei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, @@ -3050,7 +3050,7 @@ void test_vsuxei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, @@ -3062,7 +3062,7 @@ void test_vsuxei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, @@ -3074,7 +3074,7 @@ void test_vsuxei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, @@ -3086,7 +3086,7 @@ void test_vsuxei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_i64m8_m(vbool8_t mask, int64_t *base, vuint8m1_t bindex, @@ -3098,7 +3098,7 @@ void test_vsuxei8_v_i64m8_m(vbool8_t mask, int64_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, @@ -3110,7 +3110,7 @@ void test_vsuxei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, @@ -3122,7 +3122,7 @@ void test_vsuxei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, @@ -3134,7 +3134,7 @@ void test_vsuxei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_i64m8_m(vbool8_t mask, int64_t *base, vuint16m2_t bindex, @@ -3146,7 +3146,7 @@ void test_vsuxei16_v_i64m8_m(vbool8_t mask, int64_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, @@ -3158,7 +3158,7 @@ void test_vsuxei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, @@ -3170,7 +3170,7 @@ void test_vsuxei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, @@ -3182,7 +3182,7 @@ void test_vsuxei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_i64m8_m(vbool8_t mask, int64_t *base, vuint32m4_t bindex, @@ -3194,7 +3194,7 @@ void test_vsuxei32_v_i64m8_m(vbool8_t mask, int64_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, @@ -3206,7 +3206,7 @@ void test_vsuxei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, @@ -3218,7 +3218,7 @@ void test_vsuxei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, @@ -3230,7 +3230,7 @@ void test_vsuxei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_i64m8_m(vbool8_t mask, int64_t *base, vuint64m8_t bindex, @@ -3242,7 +3242,7 @@ void test_vsuxei64_v_i64m8_m(vbool8_t mask, int64_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, @@ -3254,7 +3254,7 @@ void test_vsuxei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, @@ -3266,7 +3266,7 @@ void test_vsuxei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, @@ -3278,7 +3278,7 @@ void test_vsuxei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, @@ -3290,7 +3290,7 @@ void test_vsuxei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, @@ -3302,7 +3302,7 @@ void test_vsuxei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, @@ -3314,7 +3314,7 @@ void test_vsuxei8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_u8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv64i8.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv64i8.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u8m8_m(vbool1_t mask, uint8_t *base, vuint8m8_t bindex, @@ -3326,7 +3326,7 @@ void test_vsuxei8_v_u8m8_m(vbool1_t mask, uint8_t *base, vuint8m8_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, @@ -3338,7 +3338,7 @@ void test_vsuxei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, @@ -3350,7 +3350,7 @@ void test_vsuxei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, @@ -3362,7 +3362,7 @@ void test_vsuxei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, @@ -3374,7 +3374,7 @@ void test_vsuxei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, @@ -3386,7 +3386,7 @@ void test_vsuxei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, @@ -3398,7 +3398,7 @@ void test_vsuxei16_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, @@ -3410,7 +3410,7 @@ void test_vsuxei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, @@ -3422,7 +3422,7 @@ void test_vsuxei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, @@ -3434,7 +3434,7 @@ void test_vsuxei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, @@ -3446,7 +3446,7 @@ void test_vsuxei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, @@ -3458,7 +3458,7 @@ void test_vsuxei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, @@ -3470,7 +3470,7 @@ void test_vsuxei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, @@ -3482,7 +3482,7 @@ void test_vsuxei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, @@ -3494,7 +3494,7 @@ void test_vsuxei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, @@ -3506,7 +3506,7 @@ void test_vsuxei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, @@ -3518,7 +3518,7 @@ void test_vsuxei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, @@ -3530,7 +3530,7 @@ void test_vsuxei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, @@ -3542,7 +3542,7 @@ void test_vsuxei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, @@ -3554,7 +3554,7 @@ void test_vsuxei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, @@ -3566,7 +3566,7 @@ void test_vsuxei8_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint8m4_t bindex, @@ -3578,7 +3578,7 @@ void test_vsuxei8_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint8m4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, @@ -3591,7 +3591,7 @@ void test_vsuxei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, // CHECK-RV64-LABEL: @test_vsuxei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, @@ -3604,7 +3604,7 @@ void test_vsuxei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, // CHECK-RV64-LABEL: @test_vsuxei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, @@ -3616,7 +3616,7 @@ void test_vsuxei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, @@ -3628,7 +3628,7 @@ void test_vsuxei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex, @@ -3640,7 +3640,7 @@ void test_vsuxei16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint16m8_t bindex, @@ -3652,7 +3652,7 @@ void test_vsuxei16_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint16m8_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, @@ -3665,7 +3665,7 @@ void test_vsuxei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, // CHECK-RV64-LABEL: @test_vsuxei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, @@ -3678,7 +3678,7 @@ void test_vsuxei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, // CHECK-RV64-LABEL: @test_vsuxei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, @@ -3690,7 +3690,7 @@ void test_vsuxei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, @@ -3702,7 +3702,7 @@ void test_vsuxei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, @@ -3714,7 +3714,7 @@ void test_vsuxei32_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, @@ -3727,7 +3727,7 @@ void test_vsuxei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, // CHECK-RV64-LABEL: @test_vsuxei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, @@ -3740,7 +3740,7 @@ void test_vsuxei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, // CHECK-RV64-LABEL: @test_vsuxei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, @@ -3752,7 +3752,7 @@ void test_vsuxei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, @@ -3764,7 +3764,7 @@ void test_vsuxei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, @@ -3776,7 +3776,7 @@ void test_vsuxei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, @@ -3788,7 +3788,7 @@ void test_vsuxei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, @@ -3800,7 +3800,7 @@ void test_vsuxei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, @@ -3812,7 +3812,7 @@ void test_vsuxei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint8m2_t bindex, @@ -3824,7 +3824,7 @@ void test_vsuxei8_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, @@ -3837,7 +3837,7 @@ void test_vsuxei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, // CHECK-RV64-LABEL: @test_vsuxei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u32m1_m(vbool32_t mask, uint32_t *base, @@ -3850,7 +3850,7 @@ void test_vsuxei16_v_u32m1_m(vbool32_t mask, uint32_t *base, // CHECK-RV64-LABEL: @test_vsuxei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, @@ -3862,7 +3862,7 @@ void test_vsuxei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, @@ -3874,7 +3874,7 @@ void test_vsuxei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint16m4_t bindex, @@ -3886,7 +3886,7 @@ void test_vsuxei16_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint16m4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, @@ -3899,7 +3899,7 @@ void test_vsuxei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, // CHECK-RV64-LABEL: @test_vsuxei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, @@ -3911,7 +3911,7 @@ void test_vsuxei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, @@ -3923,7 +3923,7 @@ void test_vsuxei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, @@ -3935,7 +3935,7 @@ void test_vsuxei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t bindex, @@ -3947,7 +3947,7 @@ void test_vsuxei32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, @@ -3960,7 +3960,7 @@ void test_vsuxei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, // CHECK-RV64-LABEL: @test_vsuxei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, @@ -3972,7 +3972,7 @@ void test_vsuxei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, @@ -3984,7 +3984,7 @@ void test_vsuxei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, @@ -3996,7 +3996,7 @@ void test_vsuxei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, @@ -4008,7 +4008,7 @@ void test_vsuxei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, @@ -4020,7 +4020,7 @@ void test_vsuxei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, @@ -4032,7 +4032,7 @@ void test_vsuxei8_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint8m1_t bindex, @@ -4044,7 +4044,7 @@ void test_vsuxei8_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u64m1_m(vbool64_t mask, uint64_t *base, @@ -4057,7 +4057,7 @@ void test_vsuxei16_v_u64m1_m(vbool64_t mask, uint64_t *base, // CHECK-RV64-LABEL: @test_vsuxei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u64m2_m(vbool32_t mask, uint64_t *base, @@ -4070,7 +4070,7 @@ void test_vsuxei16_v_u64m2_m(vbool32_t mask, uint64_t *base, // CHECK-RV64-LABEL: @test_vsuxei16_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, @@ -4082,7 +4082,7 @@ void test_vsuxei16_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint16m2_t bindex, @@ -4094,7 +4094,7 @@ void test_vsuxei16_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u64m1_m(vbool64_t mask, uint64_t *base, @@ -4107,7 +4107,7 @@ void test_vsuxei32_v_u64m1_m(vbool64_t mask, uint64_t *base, // CHECK-RV64-LABEL: @test_vsuxei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, @@ -4119,7 +4119,7 @@ void test_vsuxei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, @@ -4131,7 +4131,7 @@ void test_vsuxei32_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint32m4_t bindex, @@ -4143,7 +4143,7 @@ void test_vsuxei32_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, @@ -4155,7 +4155,7 @@ void test_vsuxei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, @@ -4167,7 +4167,7 @@ void test_vsuxei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, @@ -4179,7 +4179,7 @@ void test_vsuxei64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint64m8_t bindex, @@ -4191,7 +4191,7 @@ void test_vsuxei64_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, @@ -4203,7 +4203,7 @@ void test_vsuxei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, @@ -4215,7 +4215,7 @@ void test_vsuxei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, @@ -4227,7 +4227,7 @@ void test_vsuxei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f32m4_m(vbool8_t mask, float *base, vuint8m1_t bindex, @@ -4239,7 +4239,7 @@ void test_vsuxei8_v_f32m4_m(vbool8_t mask, float *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f32m8_m(vbool4_t mask, float *base, vuint8m2_t bindex, @@ -4251,7 +4251,7 @@ void test_vsuxei8_v_f32m8_m(vbool4_t mask, float *base, vuint8m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, @@ -4263,7 +4263,7 @@ void test_vsuxei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, @@ -4275,7 +4275,7 @@ void test_vsuxei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, @@ -4287,7 +4287,7 @@ void test_vsuxei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f32m4_m(vbool8_t mask, float *base, vuint16m2_t bindex, @@ -4299,7 +4299,7 @@ void test_vsuxei16_v_f32m4_m(vbool8_t mask, float *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f32m8_m(vbool4_t mask, float *base, vuint16m4_t bindex, @@ -4311,7 +4311,7 @@ void test_vsuxei16_v_f32m8_m(vbool4_t mask, float *base, vuint16m4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, @@ -4323,7 +4323,7 @@ void test_vsuxei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, @@ -4335,7 +4335,7 @@ void test_vsuxei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, @@ -4347,7 +4347,7 @@ void test_vsuxei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f32m4_m(vbool8_t mask, float *base, vuint32m4_t bindex, @@ -4359,7 +4359,7 @@ void test_vsuxei32_v_f32m4_m(vbool8_t mask, float *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f32m8_m(vbool4_t mask, float *base, vuint32m8_t bindex, @@ -4371,7 +4371,7 @@ void test_vsuxei32_v_f32m8_m(vbool4_t mask, float *base, vuint32m8_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, @@ -4383,7 +4383,7 @@ void test_vsuxei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, @@ -4395,7 +4395,7 @@ void test_vsuxei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, @@ -4407,7 +4407,7 @@ void test_vsuxei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_f32m4_m(vbool8_t mask, float *base, vuint64m8_t bindex, @@ -4419,7 +4419,7 @@ void test_vsuxei64_v_f32m4_m(vbool8_t mask, float *base, vuint64m8_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, @@ -4431,7 +4431,7 @@ void test_vsuxei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, @@ -4443,7 +4443,7 @@ void test_vsuxei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f64m4_m(vbool16_t mask, double *base, vuint8mf2_t bindex, @@ -4455,7 +4455,7 @@ void test_vsuxei8_v_f64m4_m(vbool16_t mask, double *base, vuint8mf2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei8_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei8_v_f64m8_m(vbool8_t mask, double *base, vuint8m1_t bindex, @@ -4467,7 +4467,7 @@ void test_vsuxei8_v_f64m8_m(vbool8_t mask, double *base, vuint8m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, @@ -4479,7 +4479,7 @@ void test_vsuxei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, @@ -4491,7 +4491,7 @@ void test_vsuxei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f64m4_m(vbool16_t mask, double *base, vuint16m1_t bindex, @@ -4503,7 +4503,7 @@ void test_vsuxei16_v_f64m4_m(vbool16_t mask, double *base, vuint16m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei16_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei16_v_f64m8_m(vbool8_t mask, double *base, vuint16m2_t bindex, @@ -4515,7 +4515,7 @@ void test_vsuxei16_v_f64m8_m(vbool8_t mask, double *base, vuint16m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, @@ -4527,7 +4527,7 @@ void test_vsuxei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, @@ -4539,7 +4539,7 @@ void test_vsuxei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f64m4_m(vbool16_t mask, double *base, vuint32m2_t bindex, @@ -4551,7 +4551,7 @@ void test_vsuxei32_v_f64m4_m(vbool16_t mask, double *base, vuint32m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei32_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei32_v_f64m8_m(vbool8_t mask, double *base, vuint32m4_t bindex, @@ -4563,7 +4563,7 @@ void test_vsuxei32_v_f64m8_m(vbool8_t mask, double *base, vuint32m4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, @@ -4575,7 +4575,7 @@ void test_vsuxei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, @@ -4587,7 +4587,7 @@ void test_vsuxei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_f64m4_m(vbool16_t mask, double *base, vuint64m4_t bindex, @@ -4599,7 +4599,7 @@ void test_vsuxei64_v_f64m4_m(vbool16_t mask, double *base, vuint64m4_t bindex, // CHECK-RV64-LABEL: @test_vsuxei64_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // void test_vsuxei64_v_f64m8_m(vbool8_t mask, double *base, vuint64m8_t bindex, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c index 7b83551..62c5076 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c @@ -1215,7 +1215,7 @@ vuint64m8_t test_vwaddu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) { // // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -1226,7 +1226,7 @@ vint16mf4_t test_vwadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -1237,7 +1237,7 @@ vint16mf4_t test_vwadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -1248,7 +1248,7 @@ vint16mf4_t test_vwadd_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -1259,7 +1259,7 @@ vint16mf4_t test_vwadd_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -1270,7 +1270,7 @@ vint16mf2_t test_vwadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -1281,7 +1281,7 @@ vint16mf2_t test_vwadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -1292,7 +1292,7 @@ vint16mf2_t test_vwadd_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -1303,7 +1303,7 @@ vint16mf2_t test_vwadd_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -1314,7 +1314,7 @@ vint16m1_t test_vwadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -1325,7 +1325,7 @@ vint16m1_t test_vwadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_wv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -1336,7 +1336,7 @@ vint16m1_t test_vwadd_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_wx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -1347,7 +1347,7 @@ vint16m1_t test_vwadd_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -1358,7 +1358,7 @@ vint16m2_t test_vwadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -1369,7 +1369,7 @@ vint16m2_t test_vwadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_wv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -1380,7 +1380,7 @@ vint16m2_t test_vwadd_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_wx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -1391,7 +1391,7 @@ vint16m2_t test_vwadd_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -1402,7 +1402,7 @@ vint16m4_t test_vwadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -1413,7 +1413,7 @@ vint16m4_t test_vwadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_wv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -1424,7 +1424,7 @@ vint16m4_t test_vwadd_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_wx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -1435,7 +1435,7 @@ vint16m4_t test_vwadd_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -1446,7 +1446,7 @@ vint16m8_t test_vwadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -1457,7 +1457,7 @@ vint16m8_t test_vwadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_wv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_wv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -1468,7 +1468,7 @@ vint16m8_t test_vwadd_wv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_wx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_wx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -1479,7 +1479,7 @@ vint16m8_t test_vwadd_wx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -1491,7 +1491,7 @@ vint32mf2_t test_vwadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -1502,7 +1502,7 @@ vint32mf2_t test_vwadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -1514,7 +1514,7 @@ vint32mf2_t test_vwadd_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -1525,7 +1525,7 @@ vint32mf2_t test_vwadd_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -1536,7 +1536,7 @@ vint32m1_t test_vwadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -1547,7 +1547,7 @@ vint32m1_t test_vwadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_wv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -1558,7 +1558,7 @@ vint32m1_t test_vwadd_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_wx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -1569,7 +1569,7 @@ vint32m1_t test_vwadd_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -1580,7 +1580,7 @@ vint32m2_t test_vwadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -1591,7 +1591,7 @@ vint32m2_t test_vwadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_wv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -1602,7 +1602,7 @@ vint32m2_t test_vwadd_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_wx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -1613,7 +1613,7 @@ vint32m2_t test_vwadd_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -1624,7 +1624,7 @@ vint32m4_t test_vwadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -1635,7 +1635,7 @@ vint32m4_t test_vwadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_wv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -1646,7 +1646,7 @@ vint32m4_t test_vwadd_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_wx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -1657,7 +1657,7 @@ vint32m4_t test_vwadd_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -1668,7 +1668,7 @@ vint32m8_t test_vwadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -1679,7 +1679,7 @@ vint32m8_t test_vwadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_wv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_wv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -1690,7 +1690,7 @@ vint32m8_t test_vwadd_wv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_wx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_wx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -1701,7 +1701,7 @@ vint32m8_t test_vwadd_wx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -1712,7 +1712,7 @@ vint64m1_t test_vwadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -1723,7 +1723,7 @@ vint64m1_t test_vwadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_wv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_wv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -1734,7 +1734,7 @@ vint64m1_t test_vwadd_wv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_wx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_wx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -1745,7 +1745,7 @@ vint64m1_t test_vwadd_wx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -1756,7 +1756,7 @@ vint64m2_t test_vwadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -1767,7 +1767,7 @@ vint64m2_t test_vwadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_wv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_wv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -1778,7 +1778,7 @@ vint64m2_t test_vwadd_wv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_wx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_wx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -1789,7 +1789,7 @@ vint64m2_t test_vwadd_wx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -1800,7 +1800,7 @@ vint64m4_t test_vwadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -1811,7 +1811,7 @@ vint64m4_t test_vwadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_wv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_wv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -1822,7 +1822,7 @@ vint64m4_t test_vwadd_wv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_wx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_wx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -1833,7 +1833,7 @@ vint64m4_t test_vwadd_wx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -1844,7 +1844,7 @@ vint64m8_t test_vwadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -1855,7 +1855,7 @@ vint64m8_t test_vwadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_wv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_wv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -1866,7 +1866,7 @@ vint64m8_t test_vwadd_wv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwadd_wx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_wx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -1877,7 +1877,7 @@ vint64m8_t test_vwadd_wx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -1889,7 +1889,7 @@ vuint16mf4_t test_vwaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -1900,7 +1900,7 @@ vuint16mf4_t test_vwaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -1912,7 +1912,7 @@ vuint16mf4_t test_vwaddu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -1923,7 +1923,7 @@ vuint16mf4_t test_vwaddu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -1935,7 +1935,7 @@ vuint16mf2_t test_vwaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -1946,7 +1946,7 @@ vuint16mf2_t test_vwaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -1958,7 +1958,7 @@ vuint16mf2_t test_vwaddu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -1969,7 +1969,7 @@ vuint16mf2_t test_vwaddu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -1981,7 +1981,7 @@ vuint16m1_t test_vwaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -1992,7 +1992,7 @@ vuint16m1_t test_vwaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -2004,7 +2004,7 @@ vuint16m1_t test_vwaddu_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -2015,7 +2015,7 @@ vuint16m1_t test_vwaddu_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -2026,7 +2026,7 @@ vuint16m2_t test_vwaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -2037,7 +2037,7 @@ vuint16m2_t test_vwaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -2048,7 +2048,7 @@ vuint16m2_t test_vwaddu_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -2059,7 +2059,7 @@ vuint16m2_t test_vwaddu_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -2070,7 +2070,7 @@ vuint16m4_t test_vwaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -2081,7 +2081,7 @@ vuint16m4_t test_vwaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -2092,7 +2092,7 @@ vuint16m4_t test_vwaddu_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -2103,7 +2103,7 @@ vuint16m4_t test_vwaddu_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -2114,7 +2114,7 @@ vuint16m8_t test_vwaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -2125,7 +2125,7 @@ vuint16m8_t test_vwaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_wv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -2136,7 +2136,7 @@ vuint16m8_t test_vwaddu_wv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_wx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -2147,7 +2147,7 @@ vuint16m8_t test_vwaddu_wx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -2159,7 +2159,7 @@ vuint32mf2_t test_vwaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -2171,7 +2171,7 @@ vuint32mf2_t test_vwaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -2183,7 +2183,7 @@ vuint32mf2_t test_vwaddu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -2195,7 +2195,7 @@ vuint32mf2_t test_vwaddu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -2207,7 +2207,7 @@ vuint32m1_t test_vwaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -2218,7 +2218,7 @@ vuint32m1_t test_vwaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -2230,7 +2230,7 @@ vuint32m1_t test_vwaddu_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -2241,7 +2241,7 @@ vuint32m1_t test_vwaddu_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -2253,7 +2253,7 @@ vuint32m2_t test_vwaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -2264,7 +2264,7 @@ vuint32m2_t test_vwaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -2276,7 +2276,7 @@ vuint32m2_t test_vwaddu_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -2287,7 +2287,7 @@ vuint32m2_t test_vwaddu_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -2299,7 +2299,7 @@ vuint32m4_t test_vwaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -2310,7 +2310,7 @@ vuint32m4_t test_vwaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -2322,7 +2322,7 @@ vuint32m4_t test_vwaddu_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -2333,7 +2333,7 @@ vuint32m4_t test_vwaddu_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -2345,7 +2345,7 @@ vuint32m8_t test_vwaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -2356,7 +2356,7 @@ vuint32m8_t test_vwaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_wv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -2368,7 +2368,7 @@ vuint32m8_t test_vwaddu_wv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_wx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -2379,7 +2379,7 @@ vuint32m8_t test_vwaddu_wx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -2391,7 +2391,7 @@ vuint64m1_t test_vwaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -2402,7 +2402,7 @@ vuint64m1_t test_vwaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_wv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -2414,7 +2414,7 @@ vuint64m1_t test_vwaddu_wv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_wx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -2425,7 +2425,7 @@ vuint64m1_t test_vwaddu_wx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -2437,7 +2437,7 @@ vuint64m2_t test_vwaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -2448,7 +2448,7 @@ vuint64m2_t test_vwaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_wv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -2460,7 +2460,7 @@ vuint64m2_t test_vwaddu_wv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_wx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -2471,7 +2471,7 @@ vuint64m2_t test_vwaddu_wx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -2483,7 +2483,7 @@ vuint64m4_t test_vwaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -2494,7 +2494,7 @@ vuint64m4_t test_vwaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_wv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -2506,7 +2506,7 @@ vuint64m4_t test_vwaddu_wv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_wx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -2517,7 +2517,7 @@ vuint64m4_t test_vwaddu_wx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, @@ -2529,7 +2529,7 @@ vuint64m8_t test_vwaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, @@ -2540,7 +2540,7 @@ vuint64m8_t test_vwaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_wv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, @@ -2552,7 +2552,7 @@ vuint64m8_t test_vwaddu_wv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_wx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwcvt.c index 189f0fb..1166db5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwcvt.c @@ -307,7 +307,7 @@ vuint64m8_t test_vwcvtu_x_x_v_u64m8 (vuint32m4_t src, size_t vl) { // // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwcvt_x_x_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t src, size_t vl) { @@ -317,7 +317,7 @@ vint16mf4_t test_vwcvt_x_x_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vi // // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwcvt_x_x_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t src, size_t vl) { @@ -327,7 +327,7 @@ vint16mf2_t test_vwcvt_x_x_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vi // // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwcvt_x_x_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t src, size_t vl) { @@ -337,7 +337,7 @@ vint16m1_t test_vwcvt_x_x_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint8 // // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwcvt_x_x_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint8m1_t src, size_t vl) { @@ -347,7 +347,7 @@ vint16m2_t test_vwcvt_x_x_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint8m // // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwcvt_x_x_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint8m2_t src, size_t vl) { @@ -357,7 +357,7 @@ vint16m4_t test_vwcvt_x_x_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint8m // // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwcvt_x_x_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint8m4_t src, size_t vl) { @@ -367,7 +367,7 @@ vint16m8_t test_vwcvt_x_x_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint8m // // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t src, size_t vl) { @@ -377,7 +377,7 @@ vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t src, size_t vl) { @@ -387,7 +387,7 @@ vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwcvtu_x_x_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t src, size_t vl) { @@ -397,7 +397,7 @@ vuint16m1_t test_vwcvtu_x_x_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vu // // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwcvtu_x_x_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t src, size_t vl) { @@ -407,7 +407,7 @@ vuint16m2_t test_vwcvtu_x_x_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vui // // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwcvtu_x_x_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t src, size_t vl) { @@ -417,7 +417,7 @@ vuint16m4_t test_vwcvtu_x_x_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vui // // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwcvtu_x_x_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t src, size_t vl) { @@ -427,7 +427,7 @@ vuint16m8_t test_vwcvtu_x_x_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vui // // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwcvt_x_x_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t src, size_t vl) { @@ -437,7 +437,7 @@ vint32mf2_t test_vwcvt_x_x_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vi // // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwcvt_x_x_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t src, size_t vl) { @@ -447,7 +447,7 @@ vint32m1_t test_vwcvt_x_x_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint1 // // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwcvt_x_x_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint16m1_t src, size_t vl) { @@ -457,7 +457,7 @@ vint32m2_t test_vwcvt_x_x_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint1 // // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwcvt_x_x_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint16m2_t src, size_t vl) { @@ -467,7 +467,7 @@ vint32m4_t test_vwcvt_x_x_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint16 // // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwcvt_x_x_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint16m4_t src, size_t vl) { @@ -477,7 +477,7 @@ vint32m8_t test_vwcvt_x_x_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint16 // // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t src, size_t vl) { @@ -487,7 +487,7 @@ vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwcvtu_x_x_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t src, size_t vl) { @@ -497,7 +497,7 @@ vuint32m1_t test_vwcvtu_x_x_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vu // // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwcvtu_x_x_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t src, size_t vl) { @@ -507,7 +507,7 @@ vuint32m2_t test_vwcvtu_x_x_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vu // // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwcvtu_x_x_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t src, size_t vl) { @@ -517,7 +517,7 @@ vuint32m4_t test_vwcvtu_x_x_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vui // // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwcvtu_x_x_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t src, size_t vl) { @@ -527,7 +527,7 @@ vuint32m8_t test_vwcvtu_x_x_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vui // // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwcvt_x_x_v_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t src, size_t vl) { @@ -537,7 +537,7 @@ vint64m1_t test_vwcvt_x_x_v_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint3 // // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwcvt_x_x_v_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint32m1_t src, size_t vl) { @@ -547,7 +547,7 @@ vint64m2_t test_vwcvt_x_x_v_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint3 // // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwcvt_x_x_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint32m2_t src, size_t vl) { @@ -557,7 +557,7 @@ vint64m4_t test_vwcvt_x_x_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint3 // // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwcvt_x_x_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint32m4_t src, size_t vl) { @@ -567,7 +567,7 @@ vint64m8_t test_vwcvt_x_x_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint32 // // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwcvtu_x_x_v_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t src, size_t vl) { @@ -577,7 +577,7 @@ vuint64m1_t test_vwcvtu_x_x_v_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vu // // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwcvtu_x_x_v_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t src, size_t vl) { @@ -587,7 +587,7 @@ vuint64m2_t test_vwcvtu_x_x_v_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vu // // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwcvtu_x_x_v_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t src, size_t vl) { @@ -597,7 +597,7 @@ vuint64m4_t test_vwcvtu_x_x_v_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vu // // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwcvtu_x_x_v_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t src, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmacc.c index 20c64de..ebf63d8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmacc.c @@ -1162,7 +1162,7 @@ vint64m8_t test_vwmaccus_vx_i64m8(vint64m8_t acc, uint32_t op1, vint32m4_t op2, // // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, @@ -1173,7 +1173,7 @@ vint16mf4_t test_vwmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, // // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i16.i8.nxv1i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i16.i8.nxv1i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int8_t op1, @@ -1184,7 +1184,7 @@ vint16mf4_t test_vwmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int8_t op1, // // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, @@ -1195,7 +1195,7 @@ vint16mf2_t test_vwmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, // // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i16.i8.nxv2i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i16.i8.nxv2i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int8_t op1, @@ -1206,7 +1206,7 @@ vint16mf2_t test_vwmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int8_t op1, // // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, @@ -1217,7 +1217,7 @@ vint16m1_t test_vwmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, // // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i16.i8.nxv4i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i16.i8.nxv4i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int8_t op1, @@ -1228,7 +1228,7 @@ vint16m1_t test_vwmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int8_t op1, // // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint8m1_t op1, @@ -1239,7 +1239,7 @@ vint16m2_t test_vwmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint8m1_t op1, // // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i16.i8.nxv8i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i16.i8.nxv8i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int8_t op1, @@ -1250,7 +1250,7 @@ vint16m2_t test_vwmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int8_t op1, // // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint8m2_t op1, @@ -1261,7 +1261,7 @@ vint16m4_t test_vwmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint8m2_t op1, // // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i16.i8.nxv16i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i16.i8.nxv16i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int8_t op1, @@ -1272,7 +1272,7 @@ vint16m4_t test_vwmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int8_t op1, // // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint8m4_t op1, @@ -1283,7 +1283,7 @@ vint16m8_t test_vwmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint8m4_t op1, // // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv32i16.i8.nxv32i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv32i16.i8.nxv32i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int8_t op1, @@ -1294,7 +1294,7 @@ vint16m8_t test_vwmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int8_t op1, // // CHECK-RV64-LABEL: @test_vwmacc_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, @@ -1306,7 +1306,7 @@ vint32mf2_t test_vwmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, // // CHECK-RV64-LABEL: @test_vwmacc_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i32.i16.nxv1i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i32.i16.nxv1i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, @@ -1317,7 +1317,7 @@ vint32mf2_t test_vwmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, // // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, @@ -1328,7 +1328,7 @@ vint32m1_t test_vwmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, // // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i32.i16.nxv2i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i32.i16.nxv2i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int16_t op1, @@ -1339,7 +1339,7 @@ vint32m1_t test_vwmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int16_t op1, // // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, @@ -1350,7 +1350,7 @@ vint32m2_t test_vwmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, // // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i32.i16.nxv4i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i32.i16.nxv4i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int16_t op1, @@ -1361,7 +1361,7 @@ vint32m2_t test_vwmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int16_t op1, // // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint16m2_t op1, @@ -1372,7 +1372,7 @@ vint32m4_t test_vwmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint16m2_t op1, // // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i32.i16.nxv8i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i32.i16.nxv8i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int16_t op1, @@ -1383,7 +1383,7 @@ vint32m4_t test_vwmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int16_t op1, // // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint16m4_t op1, @@ -1394,7 +1394,7 @@ vint32m8_t test_vwmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint16m4_t op1, // // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i32.i16.nxv16i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i32.i16.nxv16i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int16_t op1, @@ -1405,7 +1405,7 @@ vint32m8_t test_vwmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int16_t op1, // // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, @@ -1416,7 +1416,7 @@ vint64m1_t test_vwmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, // // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i64.i32.nxv1i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i64.i32.nxv1i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int32_t op1, @@ -1427,7 +1427,7 @@ vint64m1_t test_vwmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int32_t op1, // // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, @@ -1438,7 +1438,7 @@ vint64m2_t test_vwmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, // // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i64.i32.nxv2i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i64.i32.nxv2i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int32_t op1, @@ -1449,7 +1449,7 @@ vint64m2_t test_vwmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int32_t op1, // // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, @@ -1460,7 +1460,7 @@ vint64m4_t test_vwmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, // // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i64.i32.nxv4i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i64.i32.nxv4i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int32_t op1, @@ -1471,7 +1471,7 @@ vint64m4_t test_vwmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int32_t op1, // // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint32m4_t op1, @@ -1482,7 +1482,7 @@ vint64m8_t test_vwmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint32m4_t op1, // // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i64.i32.nxv8i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i64.i32.nxv8i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int32_t op1, @@ -1493,7 +1493,7 @@ vint64m8_t test_vwmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int32_t op1, // // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmaccu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, @@ -1505,7 +1505,7 @@ vuint16mf4_t test_vwmaccu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, // // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i16.i8.nxv1i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i16.i8.nxv1i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmaccu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, @@ -1516,7 +1516,7 @@ vuint16mf4_t test_vwmaccu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, // // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmaccu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, @@ -1528,7 +1528,7 @@ vuint16mf2_t test_vwmaccu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, // // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i16.i8.nxv2i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i16.i8.nxv2i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmaccu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, @@ -1539,7 +1539,7 @@ vuint16mf2_t test_vwmaccu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, // // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmaccu_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, @@ -1551,7 +1551,7 @@ vuint16m1_t test_vwmaccu_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, // // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i16.i8.nxv4i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i16.i8.nxv4i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmaccu_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, @@ -1562,7 +1562,7 @@ vuint16m1_t test_vwmaccu_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, // // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmaccu_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, @@ -1573,7 +1573,7 @@ vuint16m2_t test_vwmaccu_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, // // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i16.i8.nxv8i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i16.i8.nxv8i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmaccu_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint8_t op1, @@ -1584,7 +1584,7 @@ vuint16m2_t test_vwmaccu_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint8_t op1, // // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmaccu_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, @@ -1595,7 +1595,7 @@ vuint16m4_t test_vwmaccu_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, // // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i16.i8.nxv16i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i16.i8.nxv16i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmaccu_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint8_t op1, @@ -1606,7 +1606,7 @@ vuint16m4_t test_vwmaccu_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint8_t op1, // // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmaccu_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, @@ -1617,7 +1617,7 @@ vuint16m8_t test_vwmaccu_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, // // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv32i16.i8.nxv32i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv32i16.i8.nxv32i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmaccu_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint8_t op1, @@ -1628,7 +1628,7 @@ vuint16m8_t test_vwmaccu_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint8_t op1, // // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmaccu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, @@ -1640,7 +1640,7 @@ vuint32mf2_t test_vwmaccu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, // // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i32.i16.nxv1i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i32.i16.nxv1i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmaccu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, @@ -1652,7 +1652,7 @@ vuint32mf2_t test_vwmaccu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, // // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmaccu_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, @@ -1664,7 +1664,7 @@ vuint32m1_t test_vwmaccu_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, // // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i32.i16.nxv2i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i32.i16.nxv2i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmaccu_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, @@ -1675,7 +1675,7 @@ vuint32m1_t test_vwmaccu_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, // // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmaccu_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, @@ -1687,7 +1687,7 @@ vuint32m2_t test_vwmaccu_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, // // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i32.i16.nxv4i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i32.i16.nxv4i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmaccu_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, @@ -1698,7 +1698,7 @@ vuint32m2_t test_vwmaccu_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, // // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmaccu_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, @@ -1710,7 +1710,7 @@ vuint32m4_t test_vwmaccu_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, // // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i32.i16.nxv8i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i32.i16.nxv8i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmaccu_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, @@ -1721,7 +1721,7 @@ vuint32m4_t test_vwmaccu_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, // // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmaccu_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, @@ -1733,7 +1733,7 @@ vuint32m8_t test_vwmaccu_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, // // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i32.i16.nxv16i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i32.i16.nxv16i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmaccu_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, @@ -1744,7 +1744,7 @@ vuint32m8_t test_vwmaccu_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, // // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmaccu_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, @@ -1756,7 +1756,7 @@ vuint64m1_t test_vwmaccu_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, // // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i64.i32.nxv1i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i64.i32.nxv1i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmaccu_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, @@ -1767,7 +1767,7 @@ vuint64m1_t test_vwmaccu_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, // // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmaccu_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, @@ -1779,7 +1779,7 @@ vuint64m2_t test_vwmaccu_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, // // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i64.i32.nxv2i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i64.i32.nxv2i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmaccu_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, @@ -1790,7 +1790,7 @@ vuint64m2_t test_vwmaccu_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, // // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmaccu_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, @@ -1802,7 +1802,7 @@ vuint64m4_t test_vwmaccu_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, // // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i64.i32.nxv4i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i64.i32.nxv4i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmaccu_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, @@ -1813,7 +1813,7 @@ vuint64m4_t test_vwmaccu_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, // // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmaccu_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, @@ -1825,7 +1825,7 @@ vuint64m8_t test_vwmaccu_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, // // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i64.i32.nxv8i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i64.i32.nxv8i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmaccu_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, @@ -1836,7 +1836,7 @@ vuint64m8_t test_vwmaccu_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, // // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, @@ -1848,7 +1848,7 @@ vint16mf4_t test_vwmaccsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, // // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i16.i8.nxv1i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i16.i8.nxv1i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, @@ -1859,7 +1859,7 @@ vint16mf4_t test_vwmaccsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, // // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, @@ -1871,7 +1871,7 @@ vint16mf2_t test_vwmaccsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, // // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i16.i8.nxv2i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i16.i8.nxv2i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, @@ -1882,7 +1882,7 @@ vint16mf2_t test_vwmaccsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, // // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccsu_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, @@ -1894,7 +1894,7 @@ vint16m1_t test_vwmaccsu_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, // // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i16.i8.nxv4i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i16.i8.nxv4i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccsu_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int8_t op1, @@ -1905,7 +1905,7 @@ vint16m1_t test_vwmaccsu_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int8_t op1, // // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccsu_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, @@ -1916,7 +1916,7 @@ vint16m2_t test_vwmaccsu_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, // // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i16.i8.nxv8i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i16.i8.nxv8i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccsu_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int8_t op1, @@ -1927,7 +1927,7 @@ vint16m2_t test_vwmaccsu_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int8_t op1, // // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccsu_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, @@ -1938,7 +1938,7 @@ vint16m4_t test_vwmaccsu_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, // // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i16.i8.nxv16i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i16.i8.nxv16i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccsu_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int8_t op1, @@ -1949,7 +1949,7 @@ vint16m4_t test_vwmaccsu_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int8_t op1, // // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccsu_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, @@ -1960,7 +1960,7 @@ vint16m8_t test_vwmaccsu_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, // // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv32i16.i8.nxv32i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv32i16.i8.nxv32i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccsu_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int8_t op1, @@ -1971,7 +1971,7 @@ vint16m8_t test_vwmaccsu_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int8_t op1, // // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, @@ -1983,7 +1983,7 @@ vint32mf2_t test_vwmaccsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, // // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i32.i16.nxv1i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i32.i16.nxv1i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, @@ -1995,7 +1995,7 @@ vint32mf2_t test_vwmaccsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, // // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccsu_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, @@ -2007,7 +2007,7 @@ vint32m1_t test_vwmaccsu_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, // // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i32.i16.nxv2i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i32.i16.nxv2i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccsu_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int16_t op1, @@ -2018,7 +2018,7 @@ vint32m1_t test_vwmaccsu_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int16_t op1, // // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccsu_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, @@ -2030,7 +2030,7 @@ vint32m2_t test_vwmaccsu_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, // // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i32.i16.nxv4i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i32.i16.nxv4i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccsu_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int16_t op1, @@ -2041,7 +2041,7 @@ vint32m2_t test_vwmaccsu_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int16_t op1, // // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccsu_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, @@ -2053,7 +2053,7 @@ vint32m4_t test_vwmaccsu_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, // // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i32.i16.nxv8i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i32.i16.nxv8i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccsu_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int16_t op1, @@ -2064,7 +2064,7 @@ vint32m4_t test_vwmaccsu_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int16_t op1, // // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccsu_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, @@ -2076,7 +2076,7 @@ vint32m8_t test_vwmaccsu_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, // // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i32.i16.nxv16i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i32.i16.nxv16i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccsu_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int16_t op1, @@ -2087,7 +2087,7 @@ vint32m8_t test_vwmaccsu_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int16_t op1, // // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccsu_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, @@ -2099,7 +2099,7 @@ vint64m1_t test_vwmaccsu_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, // // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i64.i32.nxv1i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i64.i32.nxv1i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccsu_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int32_t op1, @@ -2110,7 +2110,7 @@ vint64m1_t test_vwmaccsu_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int32_t op1, // // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccsu_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, @@ -2122,7 +2122,7 @@ vint64m2_t test_vwmaccsu_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, // // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i64.i32.nxv2i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i64.i32.nxv2i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccsu_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int32_t op1, @@ -2133,7 +2133,7 @@ vint64m2_t test_vwmaccsu_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int32_t op1, // // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccsu_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, @@ -2145,7 +2145,7 @@ vint64m4_t test_vwmaccsu_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, // // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i64.i32.nxv4i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i64.i32.nxv4i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccsu_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int32_t op1, @@ -2156,7 +2156,7 @@ vint64m4_t test_vwmaccsu_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int32_t op1, // // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccsu_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, @@ -2168,7 +2168,7 @@ vint64m8_t test_vwmaccsu_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, // // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i64.i32.nxv8i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i64.i32.nxv8i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccsu_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int32_t op1, @@ -2179,7 +2179,7 @@ vint64m8_t test_vwmaccsu_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int32_t op1, // // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i16.i8.nxv1i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i16.i8.nxv1i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccus_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, @@ -2190,7 +2190,7 @@ vint16mf4_t test_vwmaccus_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, // // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv2i16.i8.nxv2i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv2i16.i8.nxv2i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccus_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, @@ -2201,7 +2201,7 @@ vint16mf2_t test_vwmaccus_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, // // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv4i16.i8.nxv4i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv4i16.i8.nxv4i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccus_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, uint8_t op1, @@ -2212,7 +2212,7 @@ vint16m1_t test_vwmaccus_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, uint8_t op1, // // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv8i16.i8.nxv8i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv8i16.i8.nxv8i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccus_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, uint8_t op1, @@ -2223,7 +2223,7 @@ vint16m2_t test_vwmaccus_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, uint8_t op1, // // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv16i16.i8.nxv16i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv16i16.i8.nxv16i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccus_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, uint8_t op1, @@ -2234,7 +2234,7 @@ vint16m4_t test_vwmaccus_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, uint8_t op1, // // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv32i16.i8.nxv32i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv32i16.i8.nxv32i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccus_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, uint8_t op1, @@ -2245,7 +2245,7 @@ vint16m8_t test_vwmaccus_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, uint8_t op1, // // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i32.i16.nxv1i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i32.i16.nxv1i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccus_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, @@ -2257,7 +2257,7 @@ vint32mf2_t test_vwmaccus_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, // // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv2i32.i16.nxv2i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv2i32.i16.nxv2i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccus_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, @@ -2268,7 +2268,7 @@ vint32m1_t test_vwmaccus_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, // // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv4i32.i16.nxv4i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv4i32.i16.nxv4i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccus_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, @@ -2279,7 +2279,7 @@ vint32m2_t test_vwmaccus_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, // // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv8i32.i16.nxv8i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv8i32.i16.nxv8i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccus_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, uint16_t op1, @@ -2290,7 +2290,7 @@ vint32m4_t test_vwmaccus_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, uint16_t op1, // // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv16i32.i16.nxv16i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv16i32.i16.nxv16i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccus_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, uint16_t op1, @@ -2301,7 +2301,7 @@ vint32m8_t test_vwmaccus_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, uint16_t op1, // // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i64.i32.nxv1i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i64.i32.nxv1i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccus_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, @@ -2312,7 +2312,7 @@ vint64m1_t test_vwmaccus_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, // // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv2i64.i32.nxv2i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv2i64.i32.nxv2i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccus_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, @@ -2323,7 +2323,7 @@ vint64m2_t test_vwmaccus_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, // // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv4i64.i32.nxv4i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv4i64.i32.nxv4i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccus_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, @@ -2334,7 +2334,7 @@ vint64m4_t test_vwmaccus_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, // // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv8i64.i32.nxv8i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv8i64.i32.nxv8i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccus_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, uint32_t op1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c index 5554be7..e985433 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c @@ -907,7 +907,7 @@ vint64m8_t test_vwmulsu_vx_i64m8(vint32m4_t op1, uint32_t op2, size_t vl) { // // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -917,7 +917,7 @@ vint16mf4_t test_vwmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8m // // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { @@ -927,7 +927,7 @@ vint16mf4_t test_vwmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8m // // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -937,7 +937,7 @@ vint16mf2_t test_vwmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8m // // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { @@ -947,7 +947,7 @@ vint16mf2_t test_vwmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8m // // CHECK-RV64-LABEL: @test_vwmul_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmul_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -957,7 +957,7 @@ vint16m1_t test_vwmul_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_ // // CHECK-RV64-LABEL: @test_vwmul_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmul_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { @@ -967,7 +967,7 @@ vint16m1_t test_vwmul_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_ // // CHECK-RV64-LABEL: @test_vwmul_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmul_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -977,7 +977,7 @@ vint16m2_t test_vwmul_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t // // CHECK-RV64-LABEL: @test_vwmul_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmul_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { @@ -987,7 +987,7 @@ vint16m2_t test_vwmul_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t // // CHECK-RV64-LABEL: @test_vwmul_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmul_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -997,7 +997,7 @@ vint16m4_t test_vwmul_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t // // CHECK-RV64-LABEL: @test_vwmul_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmul_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { @@ -1007,7 +1007,7 @@ vint16m4_t test_vwmul_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t // // CHECK-RV64-LABEL: @test_vwmul_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmul_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -1017,7 +1017,7 @@ vint16m8_t test_vwmul_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t // // CHECK-RV64-LABEL: @test_vwmul_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmul_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { @@ -1027,7 +1027,7 @@ vint16m8_t test_vwmul_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t // // CHECK-RV64-LABEL: @test_vwmul_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -1037,7 +1037,7 @@ vint32mf2_t test_vwmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16 // // CHECK-RV64-LABEL: @test_vwmul_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { @@ -1047,7 +1047,7 @@ vint32mf2_t test_vwmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16 // // CHECK-RV64-LABEL: @test_vwmul_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmul_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -1057,7 +1057,7 @@ vint32m1_t test_vwmul_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2 // // CHECK-RV64-LABEL: @test_vwmul_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmul_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { @@ -1067,7 +1067,7 @@ vint32m1_t test_vwmul_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2 // // CHECK-RV64-LABEL: @test_vwmul_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmul_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -1077,7 +1077,7 @@ vint32m2_t test_vwmul_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_ // // CHECK-RV64-LABEL: @test_vwmul_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmul_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { @@ -1087,7 +1087,7 @@ vint32m2_t test_vwmul_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_ // // CHECK-RV64-LABEL: @test_vwmul_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmul_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -1097,7 +1097,7 @@ vint32m4_t test_vwmul_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t // // CHECK-RV64-LABEL: @test_vwmul_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmul_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { @@ -1107,7 +1107,7 @@ vint32m4_t test_vwmul_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t // // CHECK-RV64-LABEL: @test_vwmul_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmul_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1117,7 +1117,7 @@ vint32m8_t test_vwmul_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t // // CHECK-RV64-LABEL: @test_vwmul_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmul_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { @@ -1127,7 +1127,7 @@ vint32m8_t test_vwmul_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t // // CHECK-RV64-LABEL: @test_vwmul_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1137,7 +1137,7 @@ vint64m1_t test_vwmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2 // // CHECK-RV64-LABEL: @test_vwmul_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { @@ -1147,7 +1147,7 @@ vint64m1_t test_vwmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2 // // CHECK-RV64-LABEL: @test_vwmul_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1157,7 +1157,7 @@ vint64m2_t test_vwmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_ // // CHECK-RV64-LABEL: @test_vwmul_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { @@ -1167,7 +1167,7 @@ vint64m2_t test_vwmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_ // // CHECK-RV64-LABEL: @test_vwmul_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1177,7 +1177,7 @@ vint64m4_t test_vwmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_ // // CHECK-RV64-LABEL: @test_vwmul_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { @@ -1187,7 +1187,7 @@ vint64m4_t test_vwmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_ // // CHECK-RV64-LABEL: @test_vwmul_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1197,7 +1197,7 @@ vint64m8_t test_vwmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t // // CHECK-RV64-LABEL: @test_vwmul_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { @@ -1207,7 +1207,7 @@ vint64m8_t test_vwmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t // // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmulu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1217,7 +1217,7 @@ vuint16mf4_t test_vwmulu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vui // // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmulu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -1227,7 +1227,7 @@ vuint16mf4_t test_vwmulu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vui // // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmulu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1237,7 +1237,7 @@ vuint16mf2_t test_vwmulu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vui // // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmulu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -1247,7 +1247,7 @@ vuint16mf2_t test_vwmulu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vui // // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmulu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1257,7 +1257,7 @@ vuint16m1_t test_vwmulu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8 // // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmulu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -1267,7 +1267,7 @@ vuint16m1_t test_vwmulu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8 // // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmulu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1277,7 +1277,7 @@ vuint16m2_t test_vwmulu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m // // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmulu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -1287,7 +1287,7 @@ vuint16m2_t test_vwmulu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m // // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmulu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1297,7 +1297,7 @@ vuint16m4_t test_vwmulu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m // // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmulu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -1307,7 +1307,7 @@ vuint16m4_t test_vwmulu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m // // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmulu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -1317,7 +1317,7 @@ vuint16m8_t test_vwmulu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m // // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmulu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -1327,7 +1327,7 @@ vuint16m8_t test_vwmulu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m // // CHECK-RV64-LABEL: @test_vwmulu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmulu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -1337,7 +1337,7 @@ vuint32mf2_t test_vwmulu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vui // // CHECK-RV64-LABEL: @test_vwmulu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmulu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -1347,7 +1347,7 @@ vuint32mf2_t test_vwmulu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vui // // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmulu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -1357,7 +1357,7 @@ vuint32m1_t test_vwmulu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint1 // // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmulu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -1367,7 +1367,7 @@ vuint32m1_t test_vwmulu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint1 // // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmulu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -1377,7 +1377,7 @@ vuint32m2_t test_vwmulu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint1 // // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmulu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -1387,7 +1387,7 @@ vuint32m2_t test_vwmulu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint1 // // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmulu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -1397,7 +1397,7 @@ vuint32m4_t test_vwmulu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16 // // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmulu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -1407,7 +1407,7 @@ vuint32m4_t test_vwmulu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16 // // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmulu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -1417,7 +1417,7 @@ vuint32m8_t test_vwmulu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16 // // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmulu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -1427,7 +1427,7 @@ vuint32m8_t test_vwmulu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16 // // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmulu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1437,7 +1437,7 @@ vuint64m1_t test_vwmulu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint3 // // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmulu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1447,7 +1447,7 @@ vuint64m1_t test_vwmulu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint3 // // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmulu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1457,7 +1457,7 @@ vuint64m2_t test_vwmulu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint3 // // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmulu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1467,7 +1467,7 @@ vuint64m2_t test_vwmulu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint3 // // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmulu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1477,7 +1477,7 @@ vuint64m4_t test_vwmulu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint3 // // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmulu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1487,7 +1487,7 @@ vuint64m4_t test_vwmulu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint3 // // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmulu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1497,7 +1497,7 @@ vuint64m8_t test_vwmulu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32 // // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmulu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1507,7 +1507,7 @@ vuint64m8_t test_vwmulu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32 // // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmulsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1517,7 +1517,7 @@ vint16mf4_t test_vwmulsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint // // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmulsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) { @@ -1527,7 +1527,7 @@ vint16mf4_t test_vwmulsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint // // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmulsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1537,7 +1537,7 @@ vint16mf2_t test_vwmulsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint // // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmulsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) { @@ -1547,7 +1547,7 @@ vint16mf2_t test_vwmulsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint // // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmulsu_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1557,7 +1557,7 @@ vint16m1_t test_vwmulsu_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf // // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmulsu_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) { @@ -1567,7 +1567,7 @@ vint16m1_t test_vwmulsu_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf // // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmulsu_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1577,7 +1577,7 @@ vint16m2_t test_vwmulsu_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_ // // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmulsu_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) { @@ -1587,7 +1587,7 @@ vint16m2_t test_vwmulsu_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_ // // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmulsu_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1597,7 +1597,7 @@ vint16m4_t test_vwmulsu_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_ // // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmulsu_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) { @@ -1607,7 +1607,7 @@ vint16m4_t test_vwmulsu_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_ // // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmulsu_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -1617,7 +1617,7 @@ vint16m8_t test_vwmulsu_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_ // // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmulsu_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) { @@ -1627,7 +1627,7 @@ vint16m8_t test_vwmulsu_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_ // // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmulsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -1637,7 +1637,7 @@ vint32mf2_t test_vwmulsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint // // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmulsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) { @@ -1647,7 +1647,7 @@ vint32mf2_t test_vwmulsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint // // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmulsu_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -1657,7 +1657,7 @@ vint32m1_t test_vwmulsu_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmulsu_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) { @@ -1667,7 +1667,7 @@ vint32m1_t test_vwmulsu_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmulsu_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -1677,7 +1677,7 @@ vint32m2_t test_vwmulsu_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmulsu_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) { @@ -1687,7 +1687,7 @@ vint32m2_t test_vwmulsu_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmulsu_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -1697,7 +1697,7 @@ vint32m4_t test_vwmulsu_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2 // // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmulsu_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) { @@ -1707,7 +1707,7 @@ vint32m4_t test_vwmulsu_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2 // // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmulsu_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -1717,7 +1717,7 @@ vint32m8_t test_vwmulsu_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4 // // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmulsu_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) { @@ -1727,7 +1727,7 @@ vint32m8_t test_vwmulsu_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4 // // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmulsu_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1737,7 +1737,7 @@ vint64m1_t test_vwmulsu_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32m // // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmulsu_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1747,7 +1747,7 @@ vint64m1_t test_vwmulsu_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32m // // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmulsu_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1757,7 +1757,7 @@ vint64m2_t test_vwmulsu_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m // // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmulsu_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) { @@ -1767,7 +1767,7 @@ vint64m2_t test_vwmulsu_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m // // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmulsu_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1777,7 +1777,7 @@ vint64m4_t test_vwmulsu_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m // // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmulsu_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) { @@ -1787,7 +1787,7 @@ vint64m4_t test_vwmulsu_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m // // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmulsu_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1797,7 +1797,7 @@ vint64m8_t test_vwmulsu_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4 // // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmulsu_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwredsum.c index 5205bfa..718f688 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwredsum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwredsum.c @@ -403,7 +403,7 @@ vuint64m1_t test_vwredsumu_vs_u32m8_u64m1(vuint64m1_t dst, vuint32m8_t vector, // // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8mf8_i16m1_m(vbool64_t mask, vint16m1_t dst, @@ -415,7 +415,7 @@ vint16m1_t test_vwredsum_vs_i8mf8_i16m1_m(vbool64_t mask, vint16m1_t dst, // // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8mf4_i16m1_m(vbool32_t mask, vint16m1_t dst, @@ -427,7 +427,7 @@ vint16m1_t test_vwredsum_vs_i8mf4_i16m1_m(vbool32_t mask, vint16m1_t dst, // // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8mf2_i16m1_m(vbool16_t mask, vint16m1_t dst, @@ -439,7 +439,7 @@ vint16m1_t test_vwredsum_vs_i8mf2_i16m1_m(vbool16_t mask, vint16m1_t dst, // // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8m1_i16m1_m(vbool8_t mask, vint16m1_t dst, @@ -451,7 +451,7 @@ vint16m1_t test_vwredsum_vs_i8m1_i16m1_m(vbool8_t mask, vint16m1_t dst, // // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8m2_i16m1_m(vbool4_t mask, vint16m1_t dst, @@ -463,7 +463,7 @@ vint16m1_t test_vwredsum_vs_i8m2_i16m1_m(vbool4_t mask, vint16m1_t dst, // // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8m4_i16m1_m(vbool2_t mask, vint16m1_t dst, @@ -475,7 +475,7 @@ vint16m1_t test_vwredsum_vs_i8m4_i16m1_m(vbool2_t mask, vint16m1_t dst, // // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8m8_i16m1_m(vbool1_t mask, vint16m1_t dst, @@ -487,7 +487,7 @@ vint16m1_t test_vwredsum_vs_i8m8_i16m1_m(vbool1_t mask, vint16m1_t dst, // // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwredsum_vs_i16mf4_i32m1_m(vbool64_t mask, vint32m1_t dst, @@ -499,7 +499,7 @@ vint32m1_t test_vwredsum_vs_i16mf4_i32m1_m(vbool64_t mask, vint32m1_t dst, // // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwredsum_vs_i16mf2_i32m1_m(vbool32_t mask, vint32m1_t dst, @@ -511,7 +511,7 @@ vint32m1_t test_vwredsum_vs_i16mf2_i32m1_m(vbool32_t mask, vint32m1_t dst, // // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwredsum_vs_i16m1_i32m1_m(vbool16_t mask, vint32m1_t dst, @@ -523,7 +523,7 @@ vint32m1_t test_vwredsum_vs_i16m1_i32m1_m(vbool16_t mask, vint32m1_t dst, // // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwredsum_vs_i16m2_i32m1_m(vbool8_t mask, vint32m1_t dst, @@ -535,7 +535,7 @@ vint32m1_t test_vwredsum_vs_i16m2_i32m1_m(vbool8_t mask, vint32m1_t dst, // // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwredsum_vs_i16m4_i32m1_m(vbool4_t mask, vint32m1_t dst, @@ -547,7 +547,7 @@ vint32m1_t test_vwredsum_vs_i16m4_i32m1_m(vbool4_t mask, vint32m1_t dst, // // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwredsum_vs_i16m8_i32m1_m(vbool2_t mask, vint32m1_t dst, @@ -559,7 +559,7 @@ vint32m1_t test_vwredsum_vs_i16m8_i32m1_m(vbool2_t mask, vint32m1_t dst, // // CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwredsum_vs_i32mf2_i64m1_m(vbool64_t mask, vint64m1_t dst, @@ -571,7 +571,7 @@ vint64m1_t test_vwredsum_vs_i32mf2_i64m1_m(vbool64_t mask, vint64m1_t dst, // // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwredsum_vs_i32m1_i64m1_m(vbool32_t mask, vint64m1_t dst, @@ -583,7 +583,7 @@ vint64m1_t test_vwredsum_vs_i32m1_i64m1_m(vbool32_t mask, vint64m1_t dst, // // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwredsum_vs_i32m2_i64m1_m(vbool16_t mask, vint64m1_t dst, @@ -595,7 +595,7 @@ vint64m1_t test_vwredsum_vs_i32m2_i64m1_m(vbool16_t mask, vint64m1_t dst, // // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwredsum_vs_i32m4_i64m1_m(vbool8_t mask, vint64m1_t dst, @@ -607,7 +607,7 @@ vint64m1_t test_vwredsum_vs_i32m4_i64m1_m(vbool8_t mask, vint64m1_t dst, // // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwredsum_vs_i32m8_i64m1_m(vbool4_t mask, vint64m1_t dst, @@ -619,7 +619,7 @@ vint64m1_t test_vwredsum_vs_i32m8_i64m1_m(vbool4_t mask, vint64m1_t dst, // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_m(vbool64_t mask, vuint16m1_t dst, @@ -631,7 +631,7 @@ vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_m(vbool64_t mask, vuint16m1_t dst, // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_m(vbool32_t mask, vuint16m1_t dst, @@ -643,7 +643,7 @@ vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_m(vbool32_t mask, vuint16m1_t dst, // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_m(vbool16_t mask, vuint16m1_t dst, @@ -655,7 +655,7 @@ vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_m(vbool16_t mask, vuint16m1_t dst, // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_m(vbool8_t mask, vuint16m1_t dst, @@ -667,7 +667,7 @@ vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_m(vbool8_t mask, vuint16m1_t dst, // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_m(vbool4_t mask, vuint16m1_t dst, @@ -679,7 +679,7 @@ vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_m(vbool4_t mask, vuint16m1_t dst, // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_m(vbool2_t mask, vuint16m1_t dst, @@ -691,7 +691,7 @@ vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_m(vbool2_t mask, vuint16m1_t dst, // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_m(vbool1_t mask, vuint16m1_t dst, @@ -703,7 +703,7 @@ vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_m(vbool1_t mask, vuint16m1_t dst, // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_m(vbool64_t mask, vuint32m1_t dst, @@ -715,7 +715,7 @@ vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_m(vbool64_t mask, vuint32m1_t dst, // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_m(vbool32_t mask, vuint32m1_t dst, @@ -727,7 +727,7 @@ vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_m(vbool32_t mask, vuint32m1_t dst, // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_m(vbool16_t mask, vuint32m1_t dst, @@ -739,7 +739,7 @@ vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_m(vbool16_t mask, vuint32m1_t dst, // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_m(vbool8_t mask, vuint32m1_t dst, @@ -751,7 +751,7 @@ vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_m(vbool8_t mask, vuint32m1_t dst, // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_m(vbool4_t mask, vuint32m1_t dst, @@ -763,7 +763,7 @@ vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_m(vbool4_t mask, vuint32m1_t dst, // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_m(vbool2_t mask, vuint32m1_t dst, @@ -775,7 +775,7 @@ vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_m(vbool2_t mask, vuint32m1_t dst, // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_m(vbool64_t mask, vuint64m1_t dst, @@ -787,7 +787,7 @@ vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_m(vbool64_t mask, vuint64m1_t dst, // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_m(vbool32_t mask, vuint64m1_t dst, @@ -799,7 +799,7 @@ vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_m(vbool32_t mask, vuint64m1_t dst, // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_m(vbool16_t mask, vuint64m1_t dst, @@ -811,7 +811,7 @@ vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_m(vbool16_t mask, vuint64m1_t dst, // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_m(vbool8_t mask, vuint64m1_t dst, @@ -823,7 +823,7 @@ vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_m(vbool8_t mask, vuint64m1_t dst, // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_m(vbool4_t mask, vuint64m1_t dst, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c index 649f7e8..7d94594 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c @@ -1215,7 +1215,7 @@ vuint64m8_t test_vwsubu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) { // // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -1226,7 +1226,7 @@ vint16mf4_t test_vwsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -1237,7 +1237,7 @@ vint16mf4_t test_vwsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -1248,7 +1248,7 @@ vint16mf4_t test_vwsub_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -1259,7 +1259,7 @@ vint16mf4_t test_vwsub_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -1270,7 +1270,7 @@ vint16mf2_t test_vwsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -1281,7 +1281,7 @@ vint16mf2_t test_vwsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -1292,7 +1292,7 @@ vint16mf2_t test_vwsub_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -1303,7 +1303,7 @@ vint16mf2_t test_vwsub_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -1314,7 +1314,7 @@ vint16m1_t test_vwsub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -1325,7 +1325,7 @@ vint16m1_t test_vwsub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_wv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -1336,7 +1336,7 @@ vint16m1_t test_vwsub_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_wx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -1347,7 +1347,7 @@ vint16m1_t test_vwsub_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -1358,7 +1358,7 @@ vint16m2_t test_vwsub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -1369,7 +1369,7 @@ vint16m2_t test_vwsub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_wv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -1380,7 +1380,7 @@ vint16m2_t test_vwsub_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_wx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -1391,7 +1391,7 @@ vint16m2_t test_vwsub_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -1402,7 +1402,7 @@ vint16m4_t test_vwsub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -1413,7 +1413,7 @@ vint16m4_t test_vwsub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_wv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -1424,7 +1424,7 @@ vint16m4_t test_vwsub_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_wx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -1435,7 +1435,7 @@ vint16m4_t test_vwsub_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -1446,7 +1446,7 @@ vint16m8_t test_vwsub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -1457,7 +1457,7 @@ vint16m8_t test_vwsub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_wv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_wv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -1468,7 +1468,7 @@ vint16m8_t test_vwsub_wv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_wx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_wx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -1479,7 +1479,7 @@ vint16m8_t test_vwsub_wx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -1491,7 +1491,7 @@ vint32mf2_t test_vwsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -1502,7 +1502,7 @@ vint32mf2_t test_vwsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_wv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -1514,7 +1514,7 @@ vint32mf2_t test_vwsub_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_wx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -1525,7 +1525,7 @@ vint32mf2_t test_vwsub_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -1536,7 +1536,7 @@ vint32m1_t test_vwsub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -1547,7 +1547,7 @@ vint32m1_t test_vwsub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_wv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -1558,7 +1558,7 @@ vint32m1_t test_vwsub_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_wx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -1569,7 +1569,7 @@ vint32m1_t test_vwsub_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -1580,7 +1580,7 @@ vint32m2_t test_vwsub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -1591,7 +1591,7 @@ vint32m2_t test_vwsub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_wv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -1602,7 +1602,7 @@ vint32m2_t test_vwsub_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_wx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -1613,7 +1613,7 @@ vint32m2_t test_vwsub_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -1624,7 +1624,7 @@ vint32m4_t test_vwsub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -1635,7 +1635,7 @@ vint32m4_t test_vwsub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_wv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -1646,7 +1646,7 @@ vint32m4_t test_vwsub_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_wx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -1657,7 +1657,7 @@ vint32m4_t test_vwsub_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -1668,7 +1668,7 @@ vint32m8_t test_vwsub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -1679,7 +1679,7 @@ vint32m8_t test_vwsub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_wv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_wv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -1690,7 +1690,7 @@ vint32m8_t test_vwsub_wv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_wx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_wx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -1701,7 +1701,7 @@ vint32m8_t test_vwsub_wx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -1712,7 +1712,7 @@ vint64m1_t test_vwsub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -1723,7 +1723,7 @@ vint64m1_t test_vwsub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_wv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_wv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -1734,7 +1734,7 @@ vint64m1_t test_vwsub_wv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_wx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_wx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -1745,7 +1745,7 @@ vint64m1_t test_vwsub_wx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -1756,7 +1756,7 @@ vint64m2_t test_vwsub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -1767,7 +1767,7 @@ vint64m2_t test_vwsub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_wv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_wv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -1778,7 +1778,7 @@ vint64m2_t test_vwsub_wv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_wx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_wx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -1789,7 +1789,7 @@ vint64m2_t test_vwsub_wx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -1800,7 +1800,7 @@ vint64m4_t test_vwsub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -1811,7 +1811,7 @@ vint64m4_t test_vwsub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_wv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_wv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -1822,7 +1822,7 @@ vint64m4_t test_vwsub_wv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_wx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_wx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -1833,7 +1833,7 @@ vint64m4_t test_vwsub_wx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -1844,7 +1844,7 @@ vint64m8_t test_vwsub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -1855,7 +1855,7 @@ vint64m8_t test_vwsub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_wv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_wv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -1866,7 +1866,7 @@ vint64m8_t test_vwsub_wv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsub_wx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_wx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -1877,7 +1877,7 @@ vint64m8_t test_vwsub_wx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -1889,7 +1889,7 @@ vuint16mf4_t test_vwsubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -1900,7 +1900,7 @@ vuint16mf4_t test_vwsubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -1912,7 +1912,7 @@ vuint16mf4_t test_vwsubu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -1923,7 +1923,7 @@ vuint16mf4_t test_vwsubu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -1935,7 +1935,7 @@ vuint16mf2_t test_vwsubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -1946,7 +1946,7 @@ vuint16mf2_t test_vwsubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -1958,7 +1958,7 @@ vuint16mf2_t test_vwsubu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -1969,7 +1969,7 @@ vuint16mf2_t test_vwsubu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -1981,7 +1981,7 @@ vuint16m1_t test_vwsubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -1992,7 +1992,7 @@ vuint16m1_t test_vwsubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -2004,7 +2004,7 @@ vuint16m1_t test_vwsubu_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -2015,7 +2015,7 @@ vuint16m1_t test_vwsubu_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -2026,7 +2026,7 @@ vuint16m2_t test_vwsubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -2037,7 +2037,7 @@ vuint16m2_t test_vwsubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -2048,7 +2048,7 @@ vuint16m2_t test_vwsubu_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -2059,7 +2059,7 @@ vuint16m2_t test_vwsubu_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -2070,7 +2070,7 @@ vuint16m4_t test_vwsubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -2081,7 +2081,7 @@ vuint16m4_t test_vwsubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -2092,7 +2092,7 @@ vuint16m4_t test_vwsubu_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -2103,7 +2103,7 @@ vuint16m4_t test_vwsubu_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -2114,7 +2114,7 @@ vuint16m8_t test_vwsubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -2125,7 +2125,7 @@ vuint16m8_t test_vwsubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_wv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -2136,7 +2136,7 @@ vuint16m8_t test_vwsubu_wv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_wx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -2147,7 +2147,7 @@ vuint16m8_t test_vwsubu_wx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -2159,7 +2159,7 @@ vuint32mf2_t test_vwsubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -2171,7 +2171,7 @@ vuint32mf2_t test_vwsubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_wv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -2183,7 +2183,7 @@ vuint32mf2_t test_vwsubu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_wx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -2195,7 +2195,7 @@ vuint32mf2_t test_vwsubu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -2207,7 +2207,7 @@ vuint32m1_t test_vwsubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -2218,7 +2218,7 @@ vuint32m1_t test_vwsubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -2230,7 +2230,7 @@ vuint32m1_t test_vwsubu_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -2241,7 +2241,7 @@ vuint32m1_t test_vwsubu_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -2253,7 +2253,7 @@ vuint32m2_t test_vwsubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -2264,7 +2264,7 @@ vuint32m2_t test_vwsubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -2276,7 +2276,7 @@ vuint32m2_t test_vwsubu_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -2287,7 +2287,7 @@ vuint32m2_t test_vwsubu_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -2299,7 +2299,7 @@ vuint32m4_t test_vwsubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -2310,7 +2310,7 @@ vuint32m4_t test_vwsubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -2322,7 +2322,7 @@ vuint32m4_t test_vwsubu_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -2333,7 +2333,7 @@ vuint32m4_t test_vwsubu_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -2345,7 +2345,7 @@ vuint32m8_t test_vwsubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -2356,7 +2356,7 @@ vuint32m8_t test_vwsubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_wv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -2368,7 +2368,7 @@ vuint32m8_t test_vwsubu_wv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_wx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -2379,7 +2379,7 @@ vuint32m8_t test_vwsubu_wx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -2391,7 +2391,7 @@ vuint64m1_t test_vwsubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -2402,7 +2402,7 @@ vuint64m1_t test_vwsubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_wv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -2414,7 +2414,7 @@ vuint64m1_t test_vwsubu_wv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_wx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -2425,7 +2425,7 @@ vuint64m1_t test_vwsubu_wx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -2437,7 +2437,7 @@ vuint64m2_t test_vwsubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -2448,7 +2448,7 @@ vuint64m2_t test_vwsubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_wv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -2460,7 +2460,7 @@ vuint64m2_t test_vwsubu_wv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_wx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -2471,7 +2471,7 @@ vuint64m2_t test_vwsubu_wx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -2483,7 +2483,7 @@ vuint64m4_t test_vwsubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -2494,7 +2494,7 @@ vuint64m4_t test_vwsubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_wv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -2506,7 +2506,7 @@ vuint64m4_t test_vwsubu_wv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_wx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -2517,7 +2517,7 @@ vuint64m4_t test_vwsubu_wx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, @@ -2529,7 +2529,7 @@ vuint64m8_t test_vwsubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, @@ -2540,7 +2540,7 @@ vuint64m8_t test_vwsubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_wv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, @@ -2552,7 +2552,7 @@ vuint64m8_t test_vwsubu_wv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_wx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c index 4a48cc5..047093a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c @@ -887,7 +887,7 @@ vuint64m8_t test_vxor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { // // CHECK-RV64-LABEL: @test_vxor_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vxor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -897,7 +897,7 @@ vint8mf8_t test_vxor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t // // CHECK-RV64-LABEL: @test_vxor_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vxor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { @@ -907,7 +907,7 @@ vint8mf8_t test_vxor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t // // CHECK-RV64-LABEL: @test_vxor_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vxor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -917,7 +917,7 @@ vint8mf4_t test_vxor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t // // CHECK-RV64-LABEL: @test_vxor_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vxor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { @@ -927,7 +927,7 @@ vint8mf4_t test_vxor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t // // CHECK-RV64-LABEL: @test_vxor_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vxor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -937,7 +937,7 @@ vint8mf2_t test_vxor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t // // CHECK-RV64-LABEL: @test_vxor_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vxor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { @@ -947,7 +947,7 @@ vint8mf2_t test_vxor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t // // CHECK-RV64-LABEL: @test_vxor_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vxor_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -957,7 +957,7 @@ vint8m1_t test_vxor_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, // // CHECK-RV64-LABEL: @test_vxor_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vxor_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { @@ -967,7 +967,7 @@ vint8m1_t test_vxor_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, // // CHECK-RV64-LABEL: @test_vxor_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vxor_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -977,7 +977,7 @@ vint8m2_t test_vxor_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, // // CHECK-RV64-LABEL: @test_vxor_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vxor_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { @@ -987,7 +987,7 @@ vint8m2_t test_vxor_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, // // CHECK-RV64-LABEL: @test_vxor_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vxor_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -997,7 +997,7 @@ vint8m4_t test_vxor_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, // // CHECK-RV64-LABEL: @test_vxor_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vxor_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { @@ -1007,7 +1007,7 @@ vint8m4_t test_vxor_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, // // CHECK-RV64-LABEL: @test_vxor_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vxor_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -1017,7 +1017,7 @@ vint8m8_t test_vxor_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, // // CHECK-RV64-LABEL: @test_vxor_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vxor_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { @@ -1027,7 +1027,7 @@ vint8m8_t test_vxor_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, // // CHECK-RV64-LABEL: @test_vxor_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vxor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -1037,7 +1037,7 @@ vint16mf4_t test_vxor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vxor_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vxor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { @@ -1047,7 +1047,7 @@ vint16mf4_t test_vxor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vxor_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vxor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -1057,7 +1057,7 @@ vint16mf2_t test_vxor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vxor_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vxor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { @@ -1067,7 +1067,7 @@ vint16mf2_t test_vxor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m // // CHECK-RV64-LABEL: @test_vxor_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vxor_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -1077,7 +1077,7 @@ vint16m1_t test_vxor_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t // // CHECK-RV64-LABEL: @test_vxor_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vxor_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { @@ -1087,7 +1087,7 @@ vint16m1_t test_vxor_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t // // CHECK-RV64-LABEL: @test_vxor_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vxor_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -1097,7 +1097,7 @@ vint16m2_t test_vxor_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // // CHECK-RV64-LABEL: @test_vxor_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vxor_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { @@ -1107,7 +1107,7 @@ vint16m2_t test_vxor_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // // CHECK-RV64-LABEL: @test_vxor_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vxor_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1117,7 +1117,7 @@ vint16m4_t test_vxor_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // // CHECK-RV64-LABEL: @test_vxor_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vxor_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { @@ -1127,7 +1127,7 @@ vint16m4_t test_vxor_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // // CHECK-RV64-LABEL: @test_vxor_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vxor_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -1137,7 +1137,7 @@ vint16m8_t test_vxor_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // // CHECK-RV64-LABEL: @test_vxor_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vxor_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { @@ -1147,7 +1147,7 @@ vint16m8_t test_vxor_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // // CHECK-RV64-LABEL: @test_vxor_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vxor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1157,7 +1157,7 @@ vint32mf2_t test_vxor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m // // CHECK-RV64-LABEL: @test_vxor_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vxor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { @@ -1167,7 +1167,7 @@ vint32mf2_t test_vxor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m // // CHECK-RV64-LABEL: @test_vxor_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vxor_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1177,7 +1177,7 @@ vint32m1_t test_vxor_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t // // CHECK-RV64-LABEL: @test_vxor_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vxor_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { @@ -1187,7 +1187,7 @@ vint32m1_t test_vxor_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t // // CHECK-RV64-LABEL: @test_vxor_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vxor_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1197,7 +1197,7 @@ vint32m2_t test_vxor_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t // // CHECK-RV64-LABEL: @test_vxor_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vxor_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { @@ -1207,7 +1207,7 @@ vint32m2_t test_vxor_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t // // CHECK-RV64-LABEL: @test_vxor_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vxor_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1217,7 +1217,7 @@ vint32m4_t test_vxor_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // // CHECK-RV64-LABEL: @test_vxor_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vxor_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { @@ -1227,7 +1227,7 @@ vint32m4_t test_vxor_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // // CHECK-RV64-LABEL: @test_vxor_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vxor_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1237,7 +1237,7 @@ vint32m8_t test_vxor_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // // CHECK-RV64-LABEL: @test_vxor_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vxor_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { @@ -1247,7 +1247,7 @@ vint32m8_t test_vxor_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // // CHECK-RV64-LABEL: @test_vxor_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vxor_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -1257,7 +1257,7 @@ vint64m1_t test_vxor_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t // // CHECK-RV64-LABEL: @test_vxor_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vxor_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { @@ -1267,7 +1267,7 @@ vint64m1_t test_vxor_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t // // CHECK-RV64-LABEL: @test_vxor_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vxor_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -1277,7 +1277,7 @@ vint64m2_t test_vxor_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t // // CHECK-RV64-LABEL: @test_vxor_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vxor_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { @@ -1287,7 +1287,7 @@ vint64m2_t test_vxor_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t // // CHECK-RV64-LABEL: @test_vxor_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vxor_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -1297,7 +1297,7 @@ vint64m4_t test_vxor_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t // // CHECK-RV64-LABEL: @test_vxor_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vxor_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { @@ -1307,7 +1307,7 @@ vint64m4_t test_vxor_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t // // CHECK-RV64-LABEL: @test_vxor_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vxor_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -1317,7 +1317,7 @@ vint64m8_t test_vxor_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // // CHECK-RV64-LABEL: @test_vxor_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vxor_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { @@ -1327,7 +1327,7 @@ vint64m8_t test_vxor_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // // CHECK-RV64-LABEL: @test_vxor_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vxor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1337,7 +1337,7 @@ vuint8mf8_t test_vxor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf // // CHECK-RV64-LABEL: @test_vxor_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vxor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -1347,7 +1347,7 @@ vuint8mf8_t test_vxor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf // // CHECK-RV64-LABEL: @test_vxor_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vxor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1357,7 +1357,7 @@ vuint8mf4_t test_vxor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf // // CHECK-RV64-LABEL: @test_vxor_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vxor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -1367,7 +1367,7 @@ vuint8mf4_t test_vxor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf // // CHECK-RV64-LABEL: @test_vxor_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vxor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1377,7 +1377,7 @@ vuint8mf2_t test_vxor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf // // CHECK-RV64-LABEL: @test_vxor_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vxor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -1387,7 +1387,7 @@ vuint8mf2_t test_vxor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf // // CHECK-RV64-LABEL: @test_vxor_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vxor_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1397,7 +1397,7 @@ vuint8m1_t test_vxor_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t o // // CHECK-RV64-LABEL: @test_vxor_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vxor_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -1407,7 +1407,7 @@ vuint8m1_t test_vxor_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t o // // CHECK-RV64-LABEL: @test_vxor_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vxor_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1417,7 +1417,7 @@ vuint8m2_t test_vxor_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t o // // CHECK-RV64-LABEL: @test_vxor_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vxor_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -1427,7 +1427,7 @@ vuint8m2_t test_vxor_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t o // // CHECK-RV64-LABEL: @test_vxor_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vxor_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -1437,7 +1437,7 @@ vuint8m4_t test_vxor_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t o // // CHECK-RV64-LABEL: @test_vxor_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vxor_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -1447,7 +1447,7 @@ vuint8m4_t test_vxor_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t o // // CHECK-RV64-LABEL: @test_vxor_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vxor_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -1457,7 +1457,7 @@ vuint8m8_t test_vxor_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t o // // CHECK-RV64-LABEL: @test_vxor_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vxor_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -1467,7 +1467,7 @@ vuint8m8_t test_vxor_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t o // // CHECK-RV64-LABEL: @test_vxor_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vxor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -1477,7 +1477,7 @@ vuint16mf4_t test_vxor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vxor_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vxor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -1487,7 +1487,7 @@ vuint16mf4_t test_vxor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vxor_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vxor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -1497,7 +1497,7 @@ vuint16mf2_t test_vxor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vxor_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vxor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -1507,7 +1507,7 @@ vuint16mf2_t test_vxor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vxor_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vxor_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -1517,7 +1517,7 @@ vuint16m1_t test_vxor_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m // // CHECK-RV64-LABEL: @test_vxor_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vxor_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -1527,7 +1527,7 @@ vuint16m1_t test_vxor_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m // // CHECK-RV64-LABEL: @test_vxor_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vxor_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -1537,7 +1537,7 @@ vuint16m2_t test_vxor_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2 // // CHECK-RV64-LABEL: @test_vxor_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vxor_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -1547,7 +1547,7 @@ vuint16m2_t test_vxor_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2 // // CHECK-RV64-LABEL: @test_vxor_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vxor_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -1557,7 +1557,7 @@ vuint16m4_t test_vxor_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4 // // CHECK-RV64-LABEL: @test_vxor_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vxor_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -1567,7 +1567,7 @@ vuint16m4_t test_vxor_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4 // // CHECK-RV64-LABEL: @test_vxor_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vxor_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -1577,7 +1577,7 @@ vuint16m8_t test_vxor_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8 // // CHECK-RV64-LABEL: @test_vxor_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vxor_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -1587,7 +1587,7 @@ vuint16m8_t test_vxor_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8 // // CHECK-RV64-LABEL: @test_vxor_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vxor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1597,7 +1597,7 @@ vuint32mf2_t test_vxor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vxor_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vxor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1607,7 +1607,7 @@ vuint32mf2_t test_vxor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint // // CHECK-RV64-LABEL: @test_vxor_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vxor_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1617,7 +1617,7 @@ vuint32m1_t test_vxor_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vxor_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vxor_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1627,7 +1627,7 @@ vuint32m1_t test_vxor_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vxor_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vxor_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1637,7 +1637,7 @@ vuint32m2_t test_vxor_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vxor_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vxor_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1647,7 +1647,7 @@ vuint32m2_t test_vxor_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m // // CHECK-RV64-LABEL: @test_vxor_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vxor_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1657,7 +1657,7 @@ vuint32m4_t test_vxor_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4 // // CHECK-RV64-LABEL: @test_vxor_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vxor_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1667,7 +1667,7 @@ vuint32m4_t test_vxor_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4 // // CHECK-RV64-LABEL: @test_vxor_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vxor_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1677,7 +1677,7 @@ vuint32m8_t test_vxor_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8 // // CHECK-RV64-LABEL: @test_vxor_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vxor_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -1687,7 +1687,7 @@ vuint32m8_t test_vxor_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8 // // CHECK-RV64-LABEL: @test_vxor_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vxor_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -1697,7 +1697,7 @@ vuint64m1_t test_vxor_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vxor_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vxor_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -1707,7 +1707,7 @@ vuint64m1_t test_vxor_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vxor_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vxor_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -1717,7 +1717,7 @@ vuint64m2_t test_vxor_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vxor_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vxor_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -1727,7 +1727,7 @@ vuint64m2_t test_vxor_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vxor_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vxor_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -1737,7 +1737,7 @@ vuint64m4_t test_vxor_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vxor_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vxor_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -1747,7 +1747,7 @@ vuint64m4_t test_vxor_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m // // CHECK-RV64-LABEL: @test_vxor_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vxor_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1757,7 +1757,7 @@ vuint64m8_t test_vxor_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8 // // CHECK-RV64-LABEL: @test_vxor_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vxor_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vzext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vzext.c index 299025b..05a60c5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vzext.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vzext.c @@ -287,7 +287,7 @@ vuint64m8_t test_vzext_vf2_u64m8(vuint32m4_t op1, size_t vl) { // // CHECK-RV64-LABEL: @test_vzext_vf2_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vzext_vf2_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -298,7 +298,7 @@ vuint16mf4_t test_vzext_vf2_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, // // CHECK-RV64-LABEL: @test_vzext_vf2_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vzext_vf2_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -309,7 +309,7 @@ vuint16mf2_t test_vzext_vf2_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vzext_vf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vzext_vf2_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -320,7 +320,7 @@ vuint16m1_t test_vzext_vf2_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vzext_vf2_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vzext_vf2_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -331,7 +331,7 @@ vuint16m2_t test_vzext_vf2_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vzext_vf2_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vzext_vf2_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -342,7 +342,7 @@ vuint16m4_t test_vzext_vf2_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vzext_vf2_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vzext_vf2_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -353,7 +353,7 @@ vuint16m8_t test_vzext_vf2_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vzext_vf4_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -364,7 +364,7 @@ vuint32mf2_t test_vzext_vf4_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vzext_vf4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vzext_vf4_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -375,7 +375,7 @@ vuint32m1_t test_vzext_vf4_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vzext_vf4_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vzext_vf4_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -386,7 +386,7 @@ vuint32m2_t test_vzext_vf4_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vzext_vf4_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vzext_vf4_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -397,7 +397,7 @@ vuint32m4_t test_vzext_vf4_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vzext_vf4_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vzext_vf4_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -408,7 +408,7 @@ vuint32m8_t test_vzext_vf4_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vzext_vf8_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -419,7 +419,7 @@ vuint64m1_t test_vzext_vf8_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vzext_vf8_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -430,7 +430,7 @@ vuint64m2_t test_vzext_vf8_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vzext_vf8_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -441,7 +441,7 @@ vuint64m4_t test_vzext_vf8_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vzext_vf8_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, @@ -452,7 +452,7 @@ vuint64m8_t test_vzext_vf8_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vzext_vf2_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -463,7 +463,7 @@ vuint32mf2_t test_vzext_vf2_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, // // CHECK-RV64-LABEL: @test_vzext_vf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vzext_vf2_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -474,7 +474,7 @@ vuint32m1_t test_vzext_vf2_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vzext_vf2_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vzext_vf2_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -485,7 +485,7 @@ vuint32m2_t test_vzext_vf2_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vzext_vf2_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vzext_vf2_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -496,7 +496,7 @@ vuint32m4_t test_vzext_vf2_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vzext_vf2_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vzext_vf2_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -507,7 +507,7 @@ vuint32m8_t test_vzext_vf2_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vzext_vf4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vzext_vf4_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -518,7 +518,7 @@ vuint64m1_t test_vzext_vf4_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vzext_vf4_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vzext_vf4_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -529,7 +529,7 @@ vuint64m2_t test_vzext_vf4_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vzext_vf4_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vzext_vf4_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -540,7 +540,7 @@ vuint64m4_t test_vzext_vf4_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vzext_vf4_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vzext_vf4_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, @@ -551,7 +551,7 @@ vuint64m8_t test_vzext_vf4_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, // // CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vzext_vf2_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -562,7 +562,7 @@ vuint64m1_t test_vzext_vf2_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, // // CHECK-RV64-LABEL: @test_vzext_vf2_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vzext_vf2_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -573,7 +573,7 @@ vuint64m2_t test_vzext_vf2_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, // // CHECK-RV64-LABEL: @test_vzext_vf2_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vzext_vf2_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -584,7 +584,7 @@ vuint64m4_t test_vzext_vf2_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, // // CHECK-RV64-LABEL: @test_vzext_vf2_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vzext_vf2_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp index c71469a..423e547 100644 --- a/clang/utils/TableGen/RISCVVEmitter.cpp +++ b/clang/utils/TableGen/RISCVVEmitter.cpp @@ -161,11 +161,6 @@ private: // The types we use to obtain the specific LLVM intrinsic. They are index of // InputTypes. -1 means the return type. std::vector IntrinsicTypes; - // C/C++ intrinsic operand order is different to builtin operand order. Record - // the mapping of InputTypes index. - SmallVector CTypeOrder; - // Operands are reordered in the header. - bool IsOperandReordered = false; uint8_t RISCVExtensions = 0; public: @@ -174,7 +169,6 @@ public: bool HasMaskedOffOperand, bool HasVL, bool HasNoMaskedOverloaded, bool HasAutoDef, StringRef ManualCodegen, const RVVTypes &Types, const std::vector &IntrinsicTypes, - const std::vector &PermuteOperands, StringRef RequiredExtension); ~RVVIntrinsic() = default; @@ -187,8 +181,6 @@ public: bool hasManualCodegen() const { return !ManualCodegen.empty(); } bool hasAutoDef() const { return HasAutoDef; } bool isMask() const { return IsMask; } - bool isOperandReordered() const { return IsOperandReordered; } - size_t getNumOperand() const { return InputTypes.size(); } StringRef getIRName() const { return IRName; } StringRef getManualCodegen() const { return ManualCodegen; } uint8_t getRISCVExtensions() const { return RISCVExtensions; } @@ -754,7 +746,6 @@ RVVIntrinsic::RVVIntrinsic(StringRef NewName, StringRef Suffix, bool HasNoMaskedOverloaded, bool HasAutoDef, StringRef ManualCodegen, const RVVTypes &OutInTypes, const std::vector &NewIntrinsicTypes, - const std::vector &PermuteOperands, StringRef RequiredExtension) : IRName(IRName), HasSideEffects(HasSideEffects), IsMask(IsMask), HasMaskedOffOperand(HasMaskedOffOperand), HasVL(HasVL), @@ -787,43 +778,6 @@ RVVIntrinsic::RVVIntrinsic(StringRef NewName, StringRef Suffix, // Init OutputType and InputTypes OutputType = OutInTypes[0]; InputTypes.assign(OutInTypes.begin() + 1, OutInTypes.end()); - CTypeOrder.resize(InputTypes.size()); - std::iota(CTypeOrder.begin(), CTypeOrder.end(), 0); - // Update default order if we need permutate. - if (!PermuteOperands.empty()) { - IsOperandReordered = true; - // PermuteOperands is nonmasked version index. Update index when there is - // maskedoff operand which is always in first operand. - - unsigned Skew = HasMaskedOffOperand ? 1 : 0; - for (unsigned i = 0; i < PermuteOperands.size(); ++i) { - if (i != PermuteOperands[i]) - CTypeOrder[i] = PermuteOperands[i] + Skew; - } - // Verify the result of CTypeOrder has legal value. - if (*std::max_element(CTypeOrder.begin(), CTypeOrder.end()) >= - CTypeOrder.size()) - PrintFatalError( - "The index of PermuteOperand is bigger than the operand number"); - SmallSet Seen; - for (auto Idx : CTypeOrder) { - if (!Seen.insert(Idx).second) - PrintFatalError( - "The different element in PermuteOperand could not be equal"); - } - } - - if (IsMask) { - if (HasVL) - // Builtin type order: op0, op1, ..., mask, vl - // C type order: mask, op0, op1, ..., vl - std::rotate(CTypeOrder.begin(), CTypeOrder.end() - 2, - CTypeOrder.end() - 1); - else - // Builtin type order: op0, op1, ..., mask - // C type order: mask, op0, op1, ..., - std::rotate(CTypeOrder.begin(), CTypeOrder.end() - 1, CTypeOrder.end()); - } // IntrinsicTypes is nonmasked version index. Need to update it // if there is maskedoff operand (It is always in first operand). @@ -853,6 +807,15 @@ void RVVIntrinsic::emitCodeGenSwitchBody(raw_ostream &OS) const { OS << "break;\n"; return; } + + if (isMask()) { + if (hasVL()) { + OS << " std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);\n"; + } else { + OS << " std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());\n"; + } + } + OS << " IntrinsicTypes = {"; ListSeparator LS; for (const auto &Idx : IntrinsicTypes) { @@ -865,55 +828,39 @@ void RVVIntrinsic::emitCodeGenSwitchBody(raw_ostream &OS) const { // VL could be i64 or i32, need to encode it in IntrinsicTypes. VL is // always last operand. if (hasVL()) - OS << ", Ops[" << getNumOperand() - 1 << "]->getType()"; + OS << ", Ops.back()->getType()"; OS << "};\n"; OS << " break;\n"; } void RVVIntrinsic::emitIntrinsicMacro(raw_ostream &OS) const { OS << "#define " << getName() << "("; - if (getNumOperand() > 0) { + if (!InputTypes.empty()) { ListSeparator LS; - for (const auto &I : CTypeOrder) - OS << LS << "op" << I; + for (unsigned i = 0, e = InputTypes.size(); i != e; ++i) + OS << LS << "op" << i; } OS << ") \\\n"; OS << "__builtin_rvv_" << getName() << "("; - if (getNumOperand() > 0) { + if (!InputTypes.empty()) { ListSeparator LS; - for (unsigned i = 0; i < InputTypes.size(); ++i) + for (unsigned i = 0, e = InputTypes.size(); i != e; ++i) OS << LS << "(" << InputTypes[i]->getTypeStr() << ")(op" << i << ")"; } OS << ")\n"; } void RVVIntrinsic::emitMangledFuncDef(raw_ostream &OS) const { - bool UseAliasAttr = !isMask() && !isOperandReordered(); - if (UseAliasAttr) { - OS << "__attribute__((clang_builtin_alias("; - OS << "__builtin_rvv_" << getName() << ")))\n"; - } + OS << "__attribute__((clang_builtin_alias("; + OS << "__builtin_rvv_" << getName() << ")))\n"; OS << OutputType->getTypeStr() << " " << getMangledName() << "("; // Emit function arguments - if (getNumOperand() > 0) { + if (!InputTypes.empty()) { ListSeparator LS; - for (unsigned i = 0; i < CTypeOrder.size(); ++i) - OS << LS << InputTypes[CTypeOrder[i]]->getTypeStr() << " op" << i; - } - if (UseAliasAttr) { - OS << ");\n\n"; - } else { - OS << "){\n"; - OS << " return " << getName() << "("; - // Emit parameter variables - if (getNumOperand() > 0) { - ListSeparator LS; - for (unsigned i = 0; i < CTypeOrder.size(); ++i) - OS << LS << "op" << i; - } - OS << ");\n"; - OS << "}\n\n"; + for (unsigned i = 0; i < InputTypes.size(); ++i) + OS << LS << InputTypes[i]->getTypeStr() << " op" << i; } + OS << ");\n\n"; } //===----------------------------------------------------------------------===// @@ -1123,8 +1070,6 @@ void RVVEmitter::createRVVIntrinsics( StringRef ManualCodegenMask = R->getValueAsString("ManualCodegenMask"); std::vector IntrinsicTypes = R->getValueAsListOfInts("IntrinsicTypes"); - std::vector PermuteOperands = - R->getValueAsListOfInts("PermuteOperands"); StringRef RequiredExtension = R->getValueAsString("RequiredExtension"); StringRef IRName = R->getValueAsString("IRName"); StringRef IRNameMask = R->getValueAsString("IRNameMask"); @@ -1144,11 +1089,11 @@ void RVVEmitter::createRVVIntrinsics( // Compute Builtin types SmallVector ProtoMaskSeq = ProtoSeq; if (HasMask) { - // If HasMask, append 'm' to last operand. - ProtoMaskSeq.push_back("m"); // If HasMaskedOffOperand, insert result type as first input operand. if (HasMaskedOffOperand) ProtoMaskSeq.insert(ProtoMaskSeq.begin() + 1, ProtoSeq[0]); + // If HasMask, insert 'm' as first input operand. + ProtoMaskSeq.insert(ProtoMaskSeq.begin() + 1, "m"); } // If HasVL, append 'z' to last operand if (HasVL) { @@ -1170,7 +1115,7 @@ void RVVEmitter::createRVVIntrinsics( Name, SuffixStr, MangledName, IRName, HasSideEffects, /*IsMask=*/false, /*HasMaskedOffOperand=*/false, HasVL, HasNoMaskedOverloaded, HasAutoDef, ManualCodegen, Types.getValue(), - IntrinsicTypes, PermuteOperands, RequiredExtension)); + IntrinsicTypes, RequiredExtension)); if (HasMask) { // Create a mask intrinsic Optional MaskTypes = @@ -1179,8 +1124,7 @@ void RVVEmitter::createRVVIntrinsics( Name, SuffixStr, MangledName, IRNameMask, HasSideEffects, /*IsMask=*/true, HasMaskedOffOperand, HasVL, HasNoMaskedOverloaded, HasAutoDef, ManualCodegenMask, - MaskTypes.getValue(), IntrinsicTypes, PermuteOperands, - RequiredExtension)); + MaskTypes.getValue(), IntrinsicTypes, RequiredExtension)); } } // end for Log2LMULList } // end for TypeRange -- 2.7.4